static unsigned get_aligned_compressed_row_stride( mesa_format format, unsigned width, unsigned minStride) { const unsigned blockBytes = _mesa_get_format_bytes(format); unsigned blockWidth, blockHeight; unsigned stride; _mesa_get_format_block_size(format, &blockWidth, &blockHeight); /* Count number of blocks required to store the given width. * And then multiple it with bytes required to store a block. */ stride = (width + blockWidth - 1) / blockWidth * blockBytes; /* Round the given minimum stride to the next full blocksize. * (minStride + blockBytes - 1) / blockBytes * blockBytes */ if ( stride < minStride ) stride = (minStride + blockBytes - 1) / blockBytes * blockBytes; radeon_print(RADEON_TEXTURE, RADEON_TRACE, "%s width %u, minStride %u, block(bytes %u, width %u):" "stride %u\n", __func__, width, minStride, blockBytes, blockWidth, stride); return stride; }
void brw_miptree_layout(struct brw_context *brw, struct intel_mipmap_tree *mt, uint32_t layout_flags) { mt->tr_mode = INTEL_MIPTREE_TRMODE_NONE; intel_miptree_set_alignment(brw, mt, layout_flags); intel_miptree_set_total_width_height(brw, mt); if (!mt->total_width || !mt->total_height) { intel_miptree_release(&mt); return; } /* On Gen9+ the alignment values are expressed in multiples of the block * size */ if (brw->gen >= 9) { unsigned int i, j; _mesa_get_format_block_size(mt->format, &i, &j); mt->align_w /= i; mt->align_h /= j; } if ((layout_flags & MIPTREE_LAYOUT_FOR_BO) == 0) mt->tiling = brw_miptree_choose_tiling(brw, mt, layout_flags); }
/** * This is the software fallback for Driver.GetCompressedTexImage(). * All error checking will have been done before this routine is called. */ void _mesa_get_compressed_teximage(struct gl_context *ctx, struct gl_texture_image *texImage, GLvoid *img) { const GLuint row_stride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width); GLuint i; GLubyte *src; GLint srcRowStride; if (_mesa_is_bufferobj(ctx->Pack.BufferObj)) { /* pack texture image into a PBO */ GLubyte *buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0, ctx->Pack.BufferObj->Size, GL_MAP_WRITE_BIT, ctx->Pack.BufferObj); if (!buf) { /* out of memory or other unexpected error */ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glGetCompresssedTexImage(map PBO failed)"); return; } img = ADD_POINTERS(buf, img); } /* map src texture buffer */ ctx->Driver.MapTextureImage(ctx, texImage, 0, 0, 0, texImage->Width, texImage->Height, GL_MAP_READ_BIT, &src, &srcRowStride); if (src) { /* no pixelstore or pixel transfer, but respect stride */ if (row_stride == srcRowStride) { const GLuint size = _mesa_format_image_size(texImage->TexFormat, texImage->Width, texImage->Height, texImage->Depth); memcpy(img, src, size); } else { GLuint bw, bh; _mesa_get_format_block_size(texImage->TexFormat, &bw, &bh); for (i = 0; i < (texImage->Height + bh - 1) / bh; i++) { memcpy((GLubyte *)img + i * row_stride, (GLubyte *)src + i * srcRowStride, row_stride); } } ctx->Driver.UnmapTextureImage(ctx, texImage, 0); } else { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glGetCompresssedTexImage"); } if (_mesa_is_bufferobj(ctx->Pack.BufferObj)) { ctx->Driver.UnmapBuffer(ctx, ctx->Pack.BufferObj); } }
unsigned get_texture_image_size( mesa_format format, unsigned rowStride, unsigned height, unsigned depth, unsigned tiling) { if (_mesa_is_format_compressed(format)) { unsigned blockWidth, blockHeight; _mesa_get_format_block_size(format, &blockWidth, &blockHeight); return rowStride * ((height + blockHeight - 1) / blockHeight) * depth; } else if (tiling) { /* Need to align height to tile height */ unsigned tileWidth, tileHeight; get_tile_size(format, &tileWidth, &tileHeight); tileHeight--; height = (height + tileHeight) & ~tileHeight; } return rowStride * height * depth; }
/** * Decompress a compressed texture image, returning a GL_RGBA/GL_FLOAT image. * \param srcRowStride stride in bytes between rows of blocks in the * compressed source image. */ void _mesa_decompress_image(mesa_format format, GLuint width, GLuint height, const GLubyte *src, GLint srcRowStride, GLfloat *dest) { compressed_fetch_func fetch; GLuint i, j; GLuint bytes, bw, bh; GLint stride; bytes = _mesa_get_format_bytes(format); _mesa_get_format_block_size(format, &bw, &bh); fetch = _mesa_get_compressed_fetch_func(format); if (!fetch) { _mesa_problem(NULL, "Unexpected format in _mesa_decompress_image()"); return; } stride = srcRowStride * bh / bytes; for (j = 0; j < height; j++) { for (i = 0; i < width; i++) { fetch(src, stride, i, j, dest); dest += 4; } } }
static void copy_miptrees(struct brw_context *brw, struct intel_mipmap_tree *src_mt, int src_x, int src_y, int src_z, unsigned src_level, struct intel_mipmap_tree *dst_mt, int dst_x, int dst_y, int dst_z, unsigned dst_level, int src_width, int src_height) { unsigned bw, bh; if (brw->gen >= 6) { brw_blorp_copy_miptrees(brw, src_mt, src_level, src_z, dst_mt, dst_level, dst_z, src_x, src_y, dst_x, dst_y, src_width, src_height); return; } /* We are now going to try and copy the texture using the blitter. If * that fails, we will fall back mapping the texture and using memcpy. * In either case, we need to do a full resolve. */ intel_miptree_all_slices_resolve_hiz(brw, src_mt); intel_miptree_all_slices_resolve_depth(brw, src_mt); intel_miptree_resolve_color(brw, src_mt, 0); intel_miptree_all_slices_resolve_hiz(brw, dst_mt); intel_miptree_all_slices_resolve_depth(brw, dst_mt); intel_miptree_resolve_color(brw, dst_mt, 0); _mesa_get_format_block_size(src_mt->format, &bw, &bh); /* It's legal to have a WxH that's smaller than a compressed block. This * happens for example when you are using a higher level LOD. For this case, * we still want to copy the entire block, or else the decompression will be * incorrect. */ if (src_width < bw) src_width = ALIGN_NPOT(src_width, bw); if (src_height < bh) src_height = ALIGN_NPOT(src_height, bh); if (copy_image_with_blitter(brw, src_mt, src_level, src_x, src_y, src_z, dst_mt, dst_level, dst_x, dst_y, dst_z, src_width, src_height)) return; /* This is a worst-case scenario software fallback that maps the two * textures and does a memcpy between them. */ copy_image_with_memcpy(brw, src_mt, src_level, src_x, src_y, src_z, dst_mt, dst_level, dst_x, dst_y, dst_z, src_width, src_height); }
/** * @param for_bo Indicates that the caller is * intel_miptree_create_for_bo(). If true, then do not create * \c stencil_mt. */ struct intel_mipmap_tree * intel_miptree_create_layout(struct intel_context *intel, GLenum target, mesa_format format, GLuint first_level, GLuint last_level, GLuint width0, GLuint height0, GLuint depth0, bool for_bo) { struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1); if (!mt) return NULL; DBG("%s target %s format %s level %d..%d <-- %p\n", __func__, _mesa_enum_to_string(target), _mesa_get_format_name(format), first_level, last_level, mt); mt->target = target_to_target(target); mt->format = format; mt->first_level = first_level; mt->last_level = last_level; mt->logical_width0 = width0; mt->logical_height0 = height0; mt->logical_depth0 = depth0; /* The cpp is bytes per (1, blockheight)-sized block for compressed * textures. This is why you'll see divides by blockheight all over */ unsigned bw, bh; _mesa_get_format_block_size(format, &bw, &bh); assert(_mesa_get_format_bytes(mt->format) % bw == 0); mt->cpp = _mesa_get_format_bytes(mt->format) / bw; mt->compressed = _mesa_is_format_compressed(format); mt->refcount = 1; if (target == GL_TEXTURE_CUBE_MAP) { assert(depth0 == 1); depth0 = 6; } mt->physical_width0 = width0; mt->physical_height0 = height0; mt->physical_depth0 = depth0; intel_get_texture_alignment_unit(intel, mt->format, &mt->align_w, &mt->align_h); (void) intel; if (intel->is_945) i945_miptree_layout(mt); else i915_miptree_layout(mt); return mt; }
/** * Map texture memory/buffer into user space. * Note: the region of interest parameters are ignored here. * \param mapOut returns start of mapping of region of interest * \param rowStrideOut returns row stride in bytes */ static void radeon_map_texture_image(struct gl_context *ctx, struct gl_texture_image *texImage, GLuint slice, GLuint x, GLuint y, GLuint w, GLuint h, GLbitfield mode, GLubyte **map, GLint *stride) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); radeon_texture_image *image = get_radeon_texture_image(texImage); radeon_mipmap_tree *mt = image->mt; GLuint texel_size = _mesa_get_format_bytes(texImage->TexFormat); GLuint width = texImage->Width; GLuint height = texImage->Height; struct radeon_bo *bo = !image->mt ? image->bo : image->mt->bo; unsigned int bw, bh; GLboolean write = (mode & GL_MAP_WRITE_BIT) != 0; _mesa_get_format_block_size(texImage->TexFormat, &bw, &bh); assert(y % bh == 0); y /= bh; texel_size /= bw; if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) { radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, "%s for texture that is " "queued for GPU processing.\n", __func__); radeon_firevertices(rmesa); } if (image->bo) { /* TFP case */ radeon_bo_map(image->bo, write); *stride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0, texImage->TexObject->Target); *map = bo->ptr; } else if (likely(mt)) { void *base; radeon_mipmap_level *lvl = &image->mt->levels[texImage->Level]; radeon_bo_map(mt->bo, write); base = mt->bo->ptr + lvl->faces[image->base.Base.Face].offset; *stride = lvl->rowstride; *map = base + (slice * height) * *stride; } else { /* texture data is in malloc'd memory */ assert(map); *stride = _mesa_format_row_stride(texImage->TexFormat, width); *map = image->base.Buffer + (slice * height) * *stride; } *map += y * *stride + x * texel_size; }
int intel_compressed_num_bytes(GLuint mesaFormat) { GLuint bw, bh; GLuint block_size; block_size = _mesa_get_format_bytes(mesaFormat); _mesa_get_format_block_size(mesaFormat, &bw, &bh); return block_size / bw; }
static unsigned int intel_horizontal_texture_alignment_unit(struct brw_context *brw, gl_format format) { /** * From the "Alignment Unit Size" section of various specs, namely: * - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4 * - i965 and G45 PRMs: Volume 1, Section 6.17.3.4. * - Ironlake and Sandybridge PRMs: Volume 1, Part 1, Section 7.18.3.4 * - BSpec (for Ivybridge and slight variations in separate stencil) * * +----------------------------------------------------------------------+ * | | alignment unit width ("i") | * | Surface Property |-----------------------------| * | | 915 | 965 | ILK | SNB | IVB | * +----------------------------------------------------------------------+ * | YUV 4:2:2 format | 8 | 4 | 4 | 4 | 4 | * | BC1-5 compressed format (DXTn/S3TC) | 4 | 4 | 4 | 4 | 4 | * | FXT1 compressed format | 8 | 8 | 8 | 8 | 8 | * | Depth Buffer (16-bit) | 4 | 4 | 4 | 4 | 8 | * | Depth Buffer (other) | 4 | 4 | 4 | 4 | 4 | * | Separate Stencil Buffer | N/A | N/A | 8 | 8 | 8 | * | All Others | 4 | 4 | 4 | 4 | 4 | * +----------------------------------------------------------------------+ * * On IVB+, non-special cases can be overridden by setting the SURFACE_STATE * "Surface Horizontal Alignment" field to HALIGN_4 or HALIGN_8. */ if (_mesa_is_format_compressed(format)) { /* The hardware alignment requirements for compressed textures * happen to match the block boundaries. */ unsigned int i, j; _mesa_get_format_block_size(format, &i, &j); return i; } if (format == MESA_FORMAT_S8) return 8; /* The depth alignment requirements in the table above are for rendering to * depth miplevels using the LOD control fields. We don't use LOD control * fields, and instead use page offsets plus intra-tile x/y offsets, which * require that the low 3 bits are zero. To reduce the number of x/y * offset workaround blits we do, align the X to 8, which depth texturing * can handle (sadly, it can't handle 8 in the Y direction). */ if (brw->gen >= 7 && _mesa_get_format_base_format(format) == GL_DEPTH_COMPONENT) return 8; return 4; }
/** * Compute compressed_pixelstore parameters for copying compressed * texture data. * \param dims number of texture image dimensions: 1, 2 or 3 * \param texFormat the compressed texture format * \param width, height, depth size of image to copy * \param packing pixelstore parameters describing user-space image packing * \param store returns the compressed_pixelstore parameters */ void _mesa_compute_compressed_pixelstore(GLuint dims, mesa_format texFormat, GLsizei width, GLsizei height, GLsizei depth, const struct gl_pixelstore_attrib *packing, struct compressed_pixelstore *store) { GLuint bw, bh; _mesa_get_format_block_size(texFormat, &bw, &bh); store->SkipBytes = 0; store->TotalBytesPerRow = store->CopyBytesPerRow = _mesa_format_row_stride(texFormat, width); store->TotalRowsPerSlice = store->CopyRowsPerSlice = (height + bh - 1) / bh; store->CopySlices = depth; if (packing->CompressedBlockWidth && packing->CompressedBlockSize) { bw = packing->CompressedBlockWidth; if (packing->RowLength) { store->TotalBytesPerRow = packing->CompressedBlockSize * ((packing->RowLength + bw - 1) / bw); } store->SkipBytes += packing->SkipPixels * packing->CompressedBlockSize / bw; } if (dims > 1 && packing->CompressedBlockHeight && packing->CompressedBlockSize) { bh = packing->CompressedBlockHeight; store->SkipBytes += packing->SkipRows * store->TotalBytesPerRow / bh; store->CopyRowsPerSlice = (height + bh - 1) / bh; /* rows in blocks */ if (packing->ImageHeight) { store->TotalRowsPerSlice = (packing->ImageHeight + bh - 1) / bh; } } if (dims > 2 && packing->CompressedBlockDepth && packing->CompressedBlockSize) { int bd = packing->CompressedBlockDepth; store->SkipBytes += packing->SkipImages * store->TotalBytesPerRow * store->TotalRowsPerSlice / bd; } }
/** * This is the software fallback for Driver.GetCompressedTexImage(). * All error checking will have been done before this routine is called. */ void _mesa_get_compressed_teximage(GLcontext *ctx, GLenum target, GLint level, GLvoid *img, struct gl_texture_object *texObj, struct gl_texture_image *texImage) { const GLuint row_stride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width); const GLuint row_stride_stored = _mesa_format_row_stride(texImage->TexFormat, texImage->RowStride); GLuint i; if (_mesa_is_bufferobj(ctx->Pack.BufferObj)) { /* pack texture image into a PBO */ GLubyte *buf = (GLubyte *) ctx->Driver.MapBuffer(ctx, GL_PIXEL_PACK_BUFFER_EXT, GL_WRITE_ONLY_ARB, ctx->Pack.BufferObj); if (!buf) { /* out of memory or other unexpected error */ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glGetCompresssedTexImage(map PBO failed)"); return; } img = ADD_POINTERS(buf, img); } /* no pixelstore or pixel transfer, but respect stride */ if (row_stride == row_stride_stored) { const GLuint size = _mesa_format_image_size(texImage->TexFormat, texImage->Width, texImage->Height, texImage->Depth); _mesa_memcpy(img, texImage->Data, size); } else { GLuint bw, bh; _mesa_get_format_block_size(texImage->TexFormat, &bw, &bh); for (i = 0; i < (texImage->Height + bh - 1) / bh; i++) { memcpy((GLubyte *)img + i * row_stride, (GLubyte *)texImage->Data + i * row_stride_stored, row_stride); } } if (_mesa_is_bufferobj(ctx->Pack.BufferObj)) { ctx->Driver.UnmapBuffer(ctx, GL_PIXEL_PACK_BUFFER_EXT, ctx->Pack.BufferObj); } }
static unsigned int intel_horizontal_texture_alignment_unit(struct brw_context *brw, struct intel_mipmap_tree *mt) { /** * From the "Alignment Unit Size" section of various specs, namely: * - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4 * - i965 and G45 PRMs: Volume 1, Section 6.17.3.4. * - Ironlake and Sandybridge PRMs: Volume 1, Part 1, Section 7.18.3.4 * - BSpec (for Ivybridge and slight variations in separate stencil) * * +----------------------------------------------------------------------+ * | | alignment unit width ("i") | * | Surface Property |-----------------------------| * | | 915 | 965 | ILK | SNB | IVB | * +----------------------------------------------------------------------+ * | YUV 4:2:2 format | 8 | 4 | 4 | 4 | 4 | * | BC1-5 compressed format (DXTn/S3TC) | 4 | 4 | 4 | 4 | 4 | * | FXT1 compressed format | 8 | 8 | 8 | 8 | 8 | * | Depth Buffer (16-bit) | 4 | 4 | 4 | 4 | 8 | * | Depth Buffer (other) | 4 | 4 | 4 | 4 | 4 | * | Separate Stencil Buffer | N/A | N/A | 8 | 8 | 8 | * | All Others | 4 | 4 | 4 | 4 | 4 | * +----------------------------------------------------------------------+ * * On IVB+, non-special cases can be overridden by setting the SURFACE_STATE * "Surface Horizontal Alignment" field to HALIGN_4 or HALIGN_8. */ if (_mesa_is_format_compressed(mt->format)) { /* The hardware alignment requirements for compressed textures * happen to match the block boundaries. */ unsigned int i, j; _mesa_get_format_block_size(mt->format, &i, &j); return i; } if (mt->format == MESA_FORMAT_S_UINT8) return 8; if (brw->gen >= 7 && mt->format == MESA_FORMAT_Z_UNORM16) return 8; if (brw->gen == 8 && mt->mcs_mt && mt->num_samples <= 1) return 16; return 4; }
/** * All compressed texture texel fetching is done though this function. * Basically just call a core-Mesa texel fetch function. */ static void fetch_compressed(const struct swrast_texture_image *swImage, GLint i, GLint j, GLint k, GLfloat *texel) { /* The FetchCompressedTexel function takes an integer pixel rowstride, * while the image's rowstride is bytes per row of blocks. */ GLuint bw, bh; GLuint texelBytes = _mesa_get_format_bytes(swImage->Base.TexFormat); _mesa_get_format_block_size(swImage->Base.TexFormat, &bw, &bh); assert(swImage->RowStride * bw % texelBytes == 0); swImage->FetchCompressedTexel(swImage->ImageSlices[k], swImage->RowStride * bw / texelBytes, i, j, texel); }
/* * Return the address of the pixel at (col, row, img) in a * compressed texture image. * \param col, row, img - image position (3D), should be a multiple of the * format's block size. * \param format - compressed image format * \param width - image width (stride) in pixels * \param image - the image address * \return address of pixel at (row, col, img) */ GLubyte * _mesa_compressed_image_address(GLint col, GLint row, GLint img, gl_format mesaFormat, GLsizei width, const GLubyte *image) { /* XXX only 2D images implemented, not 3D */ const GLuint blockSize = _mesa_get_format_bytes(mesaFormat); GLuint bw, bh; GLint offset; _mesa_get_format_block_size(mesaFormat, &bw, &bh); ASSERT(col % bw == 0); ASSERT(row % bh == 0); offset = ((width + bw - 1) / bw) * (row / bh) + col / bw; offset *= blockSize; return (GLubyte *) image + offset; }
static void intel_miptree_map_gtt(struct intel_context *intel, struct intel_mipmap_tree *mt, struct intel_miptree_map *map, unsigned int level, unsigned int slice) { unsigned int bw, bh; void *base; unsigned int image_x, image_y; int x = map->x; int y = map->y; /* For compressed formats, the stride is the number of bytes per * row of blocks. intel_miptree_get_image_offset() already does * the divide. */ _mesa_get_format_block_size(mt->format, &bw, &bh); assert(y % bh == 0); y /= bh; base = intel_miptree_map_raw(intel, mt) + mt->offset; if (base == NULL) map->ptr = NULL; else { /* Note that in the case of cube maps, the caller must have passed the * slice number referencing the face. */ intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y); x += image_x; y += image_y; map->stride = mt->region->pitch; map->ptr = base + y * map->stride + x * mt->cpp; } DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__, map->x, map->y, map->w, map->h, mt, _mesa_get_format_name(mt->format), x, y, map->ptr, map->stride); }
static void brw_miptree_layout_texture_3d(struct brw_context *brw, struct intel_mipmap_tree *mt) { mt->total_width = 0; mt->total_height = 0; unsigned ysum = 0; unsigned bh, bw; _mesa_get_format_block_size(mt->format, &bw, &bh); for (unsigned level = mt->first_level; level <= mt->last_level; level++) { unsigned WL = MAX2(mt->physical_width0 >> level, 1); unsigned HL = MAX2(mt->physical_height0 >> level, 1); unsigned DL = MAX2(mt->physical_depth0 >> level, 1); unsigned wL = ALIGN_NPOT(WL, mt->align_w); unsigned hL = ALIGN_NPOT(HL, mt->align_h); if (mt->target == GL_TEXTURE_CUBE_MAP) DL = 6; intel_miptree_set_level_info(mt, level, 0, 0, DL); for (unsigned q = 0; q < DL; q++) { unsigned x = (q % (1 << level)) * wL; unsigned y = ysum + (q >> level) * hL; intel_miptree_set_image_offset(mt, level, q, x / bw, y / bh); mt->total_width = MAX2(mt->total_width, (x + wL) / bw); mt->total_height = MAX2(mt->total_height, (y + hL) / bh); } ysum += ALIGN(DL, 1 << level) / (1 << level) * hL; } align_cube(mt); }
/** * Map a 2D slice of a texture image into user space. * (x,y,w,h) defines a region of interest (ROI). Reading/writing texels * outside of the ROI is undefined. * * \param texImage the texture image * \param slice the 3D image slice or array texture slice * \param x, y, w, h region of interest * \param mode bitmask of GL_MAP_READ_BIT, GL_MAP_WRITE_BIT * \param mapOut returns start of mapping of region of interest * \param rowStrideOut returns row stride (in bytes) */ void _swrast_map_teximage(struct gl_context *ctx, struct gl_texture_image *texImage, GLuint slice, GLuint x, GLuint y, GLuint w, GLuint h, GLbitfield mode, GLubyte **mapOut, GLint *rowStrideOut) { struct swrast_texture_image *swImage = swrast_texture_image(texImage); GLubyte *map; GLint stride, texelSize; GLuint bw, bh; _mesa_check_map_teximage(texImage, slice, x, y, w, h); texelSize = _mesa_get_format_bytes(texImage->TexFormat); stride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width); _mesa_get_format_block_size(texImage->TexFormat, &bw, &bh); assert(x % bw == 0); assert(y % bh == 0); if (!swImage->Buffer) { /* probably ran out of memory when allocating tex mem */ *mapOut = NULL; return; } map = swImage->Buffer; /* apply x/y offset to map address */ map += stride * (y / bh) + texelSize * (x / bw); *mapOut = map; *rowStrideOut = stride; }
static bool copy_image_with_blitter(struct brw_context *brw, struct intel_mipmap_tree *src_mt, int src_level, int src_x, int src_y, int src_z, struct intel_mipmap_tree *dst_mt, int dst_level, int dst_x, int dst_y, int dst_z, int src_width, int src_height) { GLuint bw, bh; uint32_t src_image_x, src_image_y, dst_image_x, dst_image_y; /* The blitter doesn't understand multisampling at all. */ if (src_mt->num_samples > 0 || dst_mt->num_samples > 0) return false; if (src_mt->format == MESA_FORMAT_S_UINT8) return false; /* According to the Ivy Bridge PRM, Vol1 Part4, section 1.2.1.2 (Graphics * Data Size Limitations): * * The BLT engine is capable of transferring very large quantities of * graphics data. Any graphics data read from and written to the * destination is permitted to represent a number of pixels that * occupies up to 65,536 scan lines and up to 32,768 bytes per scan line * at the destination. The maximum number of pixels that may be * represented per scan line’s worth of graphics data depends on the * color depth. * * Furthermore, intelEmitCopyBlit (which is called below) uses a signed * 16-bit integer to represent buffer pitch, so it can only handle buffer * pitches < 32k. * * As a result of these two limitations, we can only use the blitter to do * this copy when the miptree's pitch is less than 32k. */ if (src_mt->pitch >= 32768 || dst_mt->pitch >= 32768) { perf_debug("Falling back due to >=32k pitch\n"); return false; } intel_miptree_get_image_offset(src_mt, src_level, src_z, &src_image_x, &src_image_y); if (_mesa_is_format_compressed(src_mt->format)) { _mesa_get_format_block_size(src_mt->format, &bw, &bh); assert(src_x % bw == 0); assert(src_y % bh == 0); assert(src_width % bw == 0); assert(src_height % bh == 0); src_x /= (int)bw; src_y /= (int)bh; src_width /= (int)bw; src_height /= (int)bh; } src_x += src_image_x; src_y += src_image_y; intel_miptree_get_image_offset(dst_mt, dst_level, dst_z, &dst_image_x, &dst_image_y); if (_mesa_is_format_compressed(dst_mt->format)) { _mesa_get_format_block_size(dst_mt->format, &bw, &bh); assert(dst_x % bw == 0); assert(dst_y % bh == 0); dst_x /= (int)bw; dst_y /= (int)bh; } dst_x += dst_image_x; dst_y += dst_image_y; return intelEmitCopyBlit(brw, src_mt->cpp, src_mt->pitch, src_mt->bo, src_mt->offset, src_mt->tiling, src_mt->tr_mode, dst_mt->pitch, dst_mt->bo, dst_mt->offset, dst_mt->tiling, dst_mt->tr_mode, src_x, src_y, dst_x, dst_y, src_width, src_height, GL_COPY); }
bool intel_miptree_copy(struct brw_context *brw, struct intel_mipmap_tree *src_mt, int src_level, int src_slice, uint32_t src_x, uint32_t src_y, struct intel_mipmap_tree *dst_mt, int dst_level, int dst_slice, uint32_t dst_x, uint32_t dst_y, uint32_t src_width, uint32_t src_height) { /* The blitter doesn't understand multisampling at all. */ if (src_mt->surf.samples > 1 || dst_mt->surf.samples > 1) return false; if (src_mt->format == MESA_FORMAT_S_UINT8) return false; /* The blitter has no idea about HiZ or fast color clears, so we need to * resolve the miptrees before we do anything. */ intel_miptree_access_raw(brw, src_mt, src_level, src_slice, false); intel_miptree_access_raw(brw, dst_mt, dst_level, dst_slice, true); uint32_t src_image_x, src_image_y; intel_miptree_get_image_offset(src_mt, src_level, src_slice, &src_image_x, &src_image_y); if (_mesa_is_format_compressed(src_mt->format)) { GLuint bw, bh; _mesa_get_format_block_size(src_mt->format, &bw, &bh); /* Compressed textures need not have dimensions that are a multiple of * the block size. Rectangles in compressed textures do need to be a * multiple of the block size. The one exception is that the right and * bottom edges may be at the right or bottom edge of the miplevel even * if it's not aligned. */ assert(src_x % bw == 0); assert(src_y % bh == 0); assert(src_width % bw == 0 || src_x + src_width == minify(src_mt->surf.logical_level0_px.width, src_level)); assert(src_height % bh == 0 || src_y + src_height == minify(src_mt->surf.logical_level0_px.height, src_level)); src_x /= (int)bw; src_y /= (int)bh; src_width = DIV_ROUND_UP(src_width, (int)bw); src_height = DIV_ROUND_UP(src_height, (int)bh); } src_x += src_image_x; src_y += src_image_y; uint32_t dst_image_x, dst_image_y; intel_miptree_get_image_offset(dst_mt, dst_level, dst_slice, &dst_image_x, &dst_image_y); if (_mesa_is_format_compressed(dst_mt->format)) { GLuint bw, bh; _mesa_get_format_block_size(dst_mt->format, &bw, &bh); assert(dst_x % bw == 0); assert(dst_y % bh == 0); dst_x /= (int)bw; dst_y /= (int)bh; } dst_x += dst_image_x; dst_y += dst_image_y; return emit_miptree_blit(brw, src_mt, src_x, src_y, dst_mt, dst_x, dst_y, src_width, src_height, false, COLOR_LOGICOP_COPY); }
void GLAPIENTRY _mesa_CopyImageSubData(GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth) { GET_CURRENT_CONTEXT(ctx); GLuint tmpTexNames[2] = { 0, 0 }; struct gl_texture_object *srcTexObj, *dstTexObj; struct gl_texture_image *srcTexImage, *dstTexImage; GLuint src_bw, src_bh, dst_bw, dst_bh; int i, srcNewZ, dstNewZ, Bpt; if (MESA_VERBOSE & VERBOSE_API) _mesa_debug(ctx, "glCopyImageSubData(%u, %s, %d, %d, %d, %d, " "%u, %s, %d, %d, %d, %d, " "%d, %d, %d)\n", srcName, _mesa_lookup_enum_by_nr(srcTarget), srcLevel, srcX, srcY, srcZ, dstName, _mesa_lookup_enum_by_nr(dstTarget), dstLevel, dstX, dstY, dstZ, srcWidth, srcHeight, srcWidth); if (!ctx->Extensions.ARB_copy_image) { _mesa_error(ctx, GL_INVALID_OPERATION, "glCopyImageSubData(extension not available)"); return; } if (!prepare_target(ctx, srcName, &srcTarget, srcLevel, &srcTexObj, &srcTexImage, &tmpTexNames[0], "src")) goto cleanup; if (!prepare_target(ctx, dstName, &dstTarget, dstLevel, &dstTexObj, &dstTexImage, &tmpTexNames[1], "dst")) goto cleanup; _mesa_get_format_block_size(srcTexImage->TexFormat, &src_bw, &src_bh); if ((srcX % src_bw != 0) || (srcY % src_bh != 0) || (srcWidth % src_bw != 0) || (srcHeight % src_bh != 0)) { _mesa_error(ctx, GL_INVALID_VALUE, "glCopyImageSubData(unaligned src rectangle)"); goto cleanup; } _mesa_get_format_block_size(dstTexImage->TexFormat, &dst_bw, &dst_bh); if ((dstX % dst_bw != 0) || (dstY % dst_bh != 0)) { _mesa_error(ctx, GL_INVALID_VALUE, "glCopyImageSubData(unaligned dst rectangle)"); goto cleanup; } /* Very simple sanity check. This is sufficient if one of the textures * is compressed. */ Bpt = _mesa_get_format_bytes(srcTexImage->TexFormat); if (_mesa_get_format_bytes(dstTexImage->TexFormat) != Bpt) { _mesa_error(ctx, GL_INVALID_VALUE, "glCopyImageSubData(internalFormat mismatch)"); goto cleanup; } if (!check_region_bounds(ctx, srcTexImage, srcX, srcY, srcZ, srcWidth, srcHeight, srcDepth, "src")) goto cleanup; if (!check_region_bounds(ctx, dstTexImage, dstX, dstY, dstZ, (srcWidth / src_bw) * dst_bw, (srcHeight / src_bh) * dst_bh, srcDepth, "dst")) goto cleanup; if (_mesa_is_format_compressed(srcTexImage->TexFormat)) { /* XXX: Technically, we should probaby do some more specific checking * here. However, this should be sufficient for all compressed * formats that mesa supports since it is a direct memory copy. */ } else if (_mesa_is_format_compressed(dstTexImage->TexFormat)) { } else if (_mesa_texture_view_compatible_format(ctx, srcTexImage->InternalFormat, dstTexImage->InternalFormat)) { } else { return; /* Error logged by _mesa_texture_view_compatible_format */ } for (i = 0; i < srcDepth; ++i) { if (srcTexObj->Target == GL_TEXTURE_CUBE_MAP) { srcTexImage = srcTexObj->Image[i + srcZ][srcLevel]; srcNewZ = 0; } else { srcNewZ = srcZ + i; } if (dstTexObj->Target == GL_TEXTURE_CUBE_MAP) { dstTexImage = dstTexObj->Image[i + dstZ][dstLevel]; dstNewZ = 0; } else { dstNewZ = dstZ + i; } ctx->Driver.CopyImageSubData(ctx, srcTexImage, srcX, srcY, srcNewZ, dstTexImage, dstX, dstY, dstNewZ, srcWidth, srcHeight); } cleanup: _mesa_DeleteTextures(2, tmpTexNames); }
/** * Update a subregion of the given texture image. */ static void radeon_store_teximage(GLcontext* ctx, int dims, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLsizei imageSize, GLenum format, GLenum type, const GLvoid * pixels, const struct gl_pixelstore_attrib *packing, struct gl_texture_object *texObj, struct gl_texture_image *texImage, int compressed) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); radeonTexObj *t = radeon_tex_obj(texObj); radeon_texture_image* image = get_radeon_texture_image(texImage); GLuint dstRowStride; GLuint *dstImageOffsets; radeon_print(RADEON_TEXTURE, RADEON_TRACE, "%s(%p, tex %p, image %p) compressed %d\n", __func__, ctx, texObj, texImage, compressed); if (image->mt) { dstRowStride = image->mt->levels[image->mtlevel].rowstride; } else if (t->bo) { /* TFP case */ dstRowStride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0); } else { dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width); } assert(dstRowStride); if (dims == 3) { unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat); dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth); if (!dstImageOffsets) { radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__); return; } } else { dstImageOffsets = texImage->ImageOffsets; } radeon_teximage_map(image, GL_TRUE); if (compressed) { uint32_t srcRowStride, bytesPerRow, rows, block_width, block_height; GLubyte *img_start; _mesa_get_format_block_size(texImage->TexFormat, &block_width, &block_height); if (!image->mt) { dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width); img_start = _mesa_compressed_image_address(xoffset, yoffset, 0, texImage->TexFormat, texImage->Width, texImage->Data); } else { uint32_t offset; offset = dstRowStride / _mesa_get_format_bytes(texImage->TexFormat) * yoffset / block_height + xoffset / block_width; offset *= _mesa_get_format_bytes(texImage->TexFormat); img_start = texImage->Data + offset; } srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width); bytesPerRow = srcRowStride; rows = (height + block_height - 1) / block_height; copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow); } else { if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat, texImage->TexFormat, texImage->Data, xoffset, yoffset, zoffset, dstRowStride, dstImageOffsets, width, height, depth, format, type, pixels, packing)) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage"); } } if (dims == 3) { free(dstImageOffsets); } radeon_teximage_unmap(image); }
static void brw_miptree_layout_2d(struct intel_mipmap_tree *mt) { unsigned x = 0; unsigned y = 0; unsigned width = mt->physical_width0; unsigned height = mt->physical_height0; unsigned depth = mt->physical_depth0; /* number of array layers. */ unsigned int bw, bh; _mesa_get_format_block_size(mt->format, &bw, &bh); mt->total_width = mt->physical_width0; if (mt->compressed) mt->total_width = ALIGN_NPOT(mt->total_width, bw); /* May need to adjust width to accommodate the placement of * the 2nd mipmap. This occurs when the alignment * constraints of mipmap placement push the right edge of the * 2nd mipmap out past the width of its parent. */ if (mt->first_level != mt->last_level) { unsigned mip1_width; if (mt->compressed) { mip1_width = ALIGN_NPOT(minify(mt->physical_width0, 1), mt->align_w) + ALIGN_NPOT(minify(mt->physical_width0, 2), bw); } else { mip1_width = ALIGN_NPOT(minify(mt->physical_width0, 1), mt->align_w) + minify(mt->physical_width0, 2); } if (mip1_width > mt->total_width) { mt->total_width = mip1_width; } } mt->total_width /= bw; mt->total_height = 0; for (unsigned level = mt->first_level; level <= mt->last_level; level++) { unsigned img_height; intel_miptree_set_level_info(mt, level, x, y, depth); img_height = ALIGN_NPOT(height, mt->align_h); if (mt->compressed) img_height /= bh; if (mt->array_layout == ALL_SLICES_AT_EACH_LOD) { /* Compact arrays with separated miplevels */ img_height *= depth; } /* Because the images are packed better, the final offset * might not be the maximal one: */ mt->total_height = MAX2(mt->total_height, y + img_height); /* Layout_below: step right after second mipmap. */ if (level == mt->first_level + 1) { x += ALIGN_NPOT(width, mt->align_w) / bw; } else { y += img_height; } width = minify(width, 1); height = minify(height, 1); if (mt->target == GL_TEXTURE_3D) depth = minify(depth, 1); } }
void GLAPIENTRY _mesa_CopyImageSubData(GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth) { GET_CURRENT_CONTEXT(ctx); GLuint tmpTexNames[2] = { 0, 0 }; struct gl_texture_object *srcTexObj, *dstTexObj; struct gl_texture_image *srcTexImage, *dstTexImage; GLuint src_bw, src_bh, dst_bw, dst_bh; int i; if (MESA_VERBOSE & VERBOSE_API) _mesa_debug(ctx, "glCopyImageSubData(%u, %s, %d, %d, %d, %d, " "%u, %s, %d, %d, %d, %d, " "%d, %d, %d)\n", srcName, _mesa_lookup_enum_by_nr(srcTarget), srcLevel, srcX, srcY, srcZ, dstName, _mesa_lookup_enum_by_nr(dstTarget), dstLevel, dstX, dstY, dstZ, srcWidth, srcHeight, srcWidth); if (!ctx->Extensions.ARB_copy_image) { _mesa_error(ctx, GL_INVALID_OPERATION, "glCopyImageSubData(extension not available)"); return; } if (!prepare_target(ctx, srcName, &srcTarget, srcLevel, &srcTexObj, &srcTexImage, &tmpTexNames[0], "src")) goto cleanup; if (!prepare_target(ctx, dstName, &dstTarget, dstLevel, &dstTexObj, &dstTexImage, &tmpTexNames[1], "dst")) goto cleanup; _mesa_get_format_block_size(srcTexImage->TexFormat, &src_bw, &src_bh); if ((srcX % src_bw != 0) || (srcY % src_bh != 0) || (srcWidth % src_bw != 0) || (srcHeight % src_bh != 0)) { _mesa_error(ctx, GL_INVALID_VALUE, "glCopyImageSubData(unaligned src rectangle)"); goto cleanup; } _mesa_get_format_block_size(dstTexImage->TexFormat, &dst_bw, &dst_bh); if ((dstX % dst_bw != 0) || (dstY % dst_bh != 0)) { _mesa_error(ctx, GL_INVALID_VALUE, "glCopyImageSubData(unaligned dst rectangle)"); goto cleanup; } if (!check_region_bounds(ctx, srcTexImage, srcX, srcY, srcZ, srcWidth, srcHeight, srcDepth, "src")) goto cleanup; if (!check_region_bounds(ctx, dstTexImage, dstX, dstY, dstZ, (srcWidth / src_bw) * dst_bw, (srcHeight / src_bh) * dst_bh, srcDepth, "dst")) goto cleanup; if (!copy_format_compatible(ctx, srcTexImage->InternalFormat, dstTexImage->InternalFormat)) { _mesa_error(ctx, GL_INVALID_OPERATION, "glCopyImageSubData(internalFormat mismatch)"); goto cleanup; } for (i = 0; i < srcDepth; ++i) { int srcNewZ, dstNewZ; if (srcTexObj->Target == GL_TEXTURE_CUBE_MAP) { srcTexImage = srcTexObj->Image[i + srcZ][srcLevel]; srcNewZ = 0; } else { srcNewZ = srcZ + i; } if (dstTexObj->Target == GL_TEXTURE_CUBE_MAP) { dstTexImage = dstTexObj->Image[i + dstZ][dstLevel]; dstNewZ = 0; } else { dstNewZ = dstZ + i; } ctx->Driver.CopyImageSubData(ctx, srcTexImage, srcX, srcY, srcNewZ, dstTexImage, dstX, dstY, dstNewZ, srcWidth, srcHeight); } cleanup: _mesa_DeleteTextures(2, tmpTexNames); }
/** * Decompress a compressed texture image, returning a GL_RGBA/GL_FLOAT image. * \param srcRowStride stride in bytes between rows of blocks in the * compressed source image. */ void _mesa_decompress_image(gl_format format, GLuint width, GLuint height, const GLubyte *src, GLint srcRowStride, GLfloat *dest) { void (*fetch)(const struct swrast_texture_image *texImage, GLint i, GLint j, GLint k, GLfloat *texel); struct swrast_texture_image texImage; /* dummy teximage */ GLuint i, j; GLuint bytes, bw, bh; bytes = _mesa_get_format_bytes(format); _mesa_get_format_block_size(format, &bw, &bh); /* setup dummy texture image info */ memset(&texImage, 0, sizeof(texImage)); texImage.Map = (void *) src; /* XXX This line is a bit of a hack to adapt to the row stride * convention used by the texture decompression functions. */ texImage.RowStride = srcRowStride * bh / bytes; switch (format) { /* DXT formats */ case MESA_FORMAT_RGB_DXT1: fetch = _mesa_fetch_texel_rgb_dxt1; break; case MESA_FORMAT_RGBA_DXT1: fetch = _mesa_fetch_texel_rgba_dxt1; break; case MESA_FORMAT_RGBA_DXT3: fetch = _mesa_fetch_texel_rgba_dxt3; break; case MESA_FORMAT_RGBA_DXT5: fetch = _mesa_fetch_texel_rgba_dxt5; break; /* FXT1 formats */ case MESA_FORMAT_RGB_FXT1: fetch = _mesa_fetch_texel_2d_f_rgb_fxt1; break; case MESA_FORMAT_RGBA_FXT1: fetch = _mesa_fetch_texel_2d_f_rgba_fxt1; break; /* Red/RG formats */ case MESA_FORMAT_RED_RGTC1: fetch = _mesa_fetch_texel_red_rgtc1; break; case MESA_FORMAT_SIGNED_RED_RGTC1: fetch = _mesa_fetch_texel_signed_red_rgtc1; break; case MESA_FORMAT_RG_RGTC2: fetch = _mesa_fetch_texel_rg_rgtc2; break; case MESA_FORMAT_SIGNED_RG_RGTC2: fetch = _mesa_fetch_texel_signed_rg_rgtc2; break; /* L/LA formats */ case MESA_FORMAT_L_LATC1: fetch = _mesa_fetch_texel_l_latc1; break; case MESA_FORMAT_SIGNED_L_LATC1: fetch = _mesa_fetch_texel_signed_l_latc1; break; case MESA_FORMAT_LA_LATC2: fetch = _mesa_fetch_texel_la_latc2; break; case MESA_FORMAT_SIGNED_LA_LATC2: fetch = _mesa_fetch_texel_signed_la_latc2; break; /* ETC1 formats */ case MESA_FORMAT_ETC1_RGB8: fetch = _mesa_fetch_texel_2d_f_etc1_rgb8; break; default: _mesa_problem(NULL, "Unexpected format in _mesa_decompress_image()"); return; } for (j = 0; j < height; j++) { for (i = 0; i < width; i++) { fetch(&texImage, i, j, 0, dest); dest += 4; } } }
static void intel_miptree_set_alignment(struct brw_context *brw, struct intel_mipmap_tree *mt, uint32_t layout_flags) { /** * From the "Alignment Unit Size" section of various specs, namely: * - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4 * - i965 and G45 PRMs: Volume 1, Section 6.17.3.4. * - Ironlake and Sandybridge PRMs: Volume 1, Part 1, Section 7.18.3.4 * - BSpec (for Ivybridge and slight variations in separate stencil) */ bool gen6_hiz_or_stencil = false; if (brw->gen == 6 && mt->array_layout == ALL_SLICES_AT_EACH_LOD) { const GLenum base_format = _mesa_get_format_base_format(mt->format); gen6_hiz_or_stencil = _mesa_is_depth_or_stencil_format(base_format); } if (gen6_hiz_or_stencil) { /* On gen6, we use ALL_SLICES_AT_EACH_LOD for stencil/hiz because the * hardware doesn't support multiple mip levels on stencil/hiz. * * PRM Vol 2, Part 1, 7.5.3 Hierarchical Depth Buffer: * "The hierarchical depth buffer does not support the LOD field" * * PRM Vol 2, Part 1, 7.5.4.1 Separate Stencil Buffer: * "The stencil depth buffer does not support the LOD field" */ if (mt->format == MESA_FORMAT_S_UINT8) { /* Stencil uses W tiling, so we force W tiling alignment for the * ALL_SLICES_AT_EACH_LOD miptree layout. */ mt->align_w = 64; mt->align_h = 64; assert((layout_flags & MIPTREE_LAYOUT_FORCE_HALIGN16) == 0); } else { /* Depth uses Y tiling, so we force need Y tiling alignment for the * ALL_SLICES_AT_EACH_LOD miptree layout. */ mt->align_w = 128 / mt->cpp; mt->align_h = 32; } } else if (mt->compressed) { /* The hardware alignment requirements for compressed textures * happen to match the block boundaries. */ _mesa_get_format_block_size(mt->format, &mt->align_w, &mt->align_h); /* On Gen9+ we can pick our own alignment for compressed textures but it * has to be a multiple of the block size. The minimum alignment we can * pick is 4 so we effectively have to align to 4 times the block * size */ if (brw->gen >= 9) { mt->align_w *= 4; mt->align_h *= 4; } } else if (mt->format == MESA_FORMAT_S_UINT8) { mt->align_w = 8; mt->align_h = brw->gen >= 7 ? 8 : 4; } else if (brw->gen >= 9 && mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE) { /* XY_FAST_COPY_BLT doesn't support horizontal alignment < 32 or * vertical alignment < 64. */ mt->align_w = MAX2(tr_mode_horizontal_texture_alignment(brw, mt), 32); mt->align_h = MAX2(tr_mode_vertical_texture_alignment(brw, mt), 64); } else { mt->align_w = intel_horizontal_texture_alignment_unit(brw, mt, layout_flags); mt->align_h = intel_vertical_texture_alignment_unit(brw, mt); } }
void GLAPIENTRY _mesa_GetInternalformativ(GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params) { GLint buffer[16]; GET_CURRENT_CONTEXT(ctx); ASSERT_OUTSIDE_BEGIN_END(ctx); /* ARB_internalformat_query is also mandatory for ARB_internalformat_query2 */ if (!(_mesa_has_ARB_internalformat_query(ctx) || _mesa_is_gles3(ctx))) { _mesa_error(ctx, GL_INVALID_OPERATION, "glGetInternalformativ"); return; } assert(ctx->Driver.QueryInternalFormat != NULL); if (!_legal_parameters(ctx, target, internalformat, pname, bufSize, params)) return; /* initialize the contents of the temporary buffer */ memcpy(buffer, params, MIN2(bufSize, 16) * sizeof(GLint)); /* Use the 'unsupported' response defined by the spec for every pname * as the default answer. */ _set_default_response(pname, buffer); if (!_is_target_supported(ctx, target) || !_is_internalformat_supported(ctx, target, internalformat) || !_is_resource_supported(ctx, target, internalformat, pname)) goto end; switch (pname) { case GL_SAMPLES: /* fall-through */ case GL_NUM_SAMPLE_COUNTS: /* The ARB_internalformat_query2 sets the response as 'unsupported' for * SAMPLES and NUM_SAMPLE_COUNTS: * * "If <internalformat> is not color-renderable, depth-renderable, or * stencil-renderable (as defined in section 4.4.4), or if <target> * does not support multiple samples (ie other than * TEXTURE_2D_MULTISAMPLE, TEXTURE_2D_MULTISAMPLE_ARRAY, * or RENDERBUFFER)." */ if ((target != GL_RENDERBUFFER && target != GL_TEXTURE_2D_MULTISAMPLE && target != GL_TEXTURE_2D_MULTISAMPLE_ARRAY) || !_is_renderable(ctx, internalformat)) goto end; /* The GL ES 3.0 specification, section 6.1.15 page 236 says: * * "Since multisampling is not supported for signed and unsigned * integer internal formats, the value of NUM_SAMPLE_COUNTS will be * zero for such formats. * * Since OpenGL ES 3.1 adds support for multisampled integer formats, we * have to check the version for 30 exactly. */ if (pname == GL_NUM_SAMPLE_COUNTS && ctx->API == API_OPENGLES2 && ctx->Version == 30 && _mesa_is_enum_format_integer(internalformat)) { goto end; } ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); break; case GL_INTERNALFORMAT_SUPPORTED: /* Having a supported <internalformat> is implemented as a prerequisite * for all the <pnames>. Thus, if we reach this point, the internalformat is * supported. */ buffer[0] = GL_TRUE; break; case GL_INTERNALFORMAT_PREFERRED: /* The ARB_internalformat_query2 spec says: * * "- INTERNALFORMAT_PREFERRED: The implementation-preferred internal * format for representing resources of the specified <internalformat> is * returned in <params>. * * Therefore, we let the driver answer. Note that if we reach this * point, it means that the internalformat is supported, so the driver * is called just to try to get a preferred format. If not supported, * GL_NONE was already returned and the driver is not called. */ ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); break; case GL_INTERNALFORMAT_RED_SIZE: case GL_INTERNALFORMAT_GREEN_SIZE: case GL_INTERNALFORMAT_BLUE_SIZE: case GL_INTERNALFORMAT_ALPHA_SIZE: case GL_INTERNALFORMAT_DEPTH_SIZE: case GL_INTERNALFORMAT_STENCIL_SIZE: case GL_INTERNALFORMAT_SHARED_SIZE: case GL_INTERNALFORMAT_RED_TYPE: case GL_INTERNALFORMAT_GREEN_TYPE: case GL_INTERNALFORMAT_BLUE_TYPE: case GL_INTERNALFORMAT_ALPHA_TYPE: case GL_INTERNALFORMAT_DEPTH_TYPE: case GL_INTERNALFORMAT_STENCIL_TYPE: { GLint baseformat; mesa_format texformat; if (target != GL_RENDERBUFFER) { if (!_mesa_legal_get_tex_level_parameter_target(ctx, target, true)) goto end; baseformat = _mesa_base_tex_format(ctx, internalformat); } else { baseformat = _mesa_base_fbo_format(ctx, internalformat); } /* Let the driver choose the texture format. * * Disclaimer: I am considering that drivers use for renderbuffers the * same format-choice logic as for textures. */ texformat = ctx->Driver.ChooseTextureFormat(ctx, target, internalformat, GL_NONE /*format */, GL_NONE /* type */); if (texformat == MESA_FORMAT_NONE || baseformat <= 0) goto end; /* Implementation based on what Mesa does for glGetTexLevelParameteriv * and glGetRenderbufferParameteriv functions. */ if (pname == GL_INTERNALFORMAT_SHARED_SIZE) { if (_mesa_has_EXT_texture_shared_exponent(ctx) && target != GL_TEXTURE_BUFFER && target != GL_RENDERBUFFER && texformat == MESA_FORMAT_R9G9B9E5_FLOAT) { buffer[0] = 5; } goto end; } if (!_mesa_base_format_has_channel(baseformat, pname)) goto end; switch (pname) { case GL_INTERNALFORMAT_DEPTH_SIZE: if (ctx->API != API_OPENGL_CORE && !_mesa_has_ARB_depth_texture(ctx) && target != GL_RENDERBUFFER && target != GL_TEXTURE_BUFFER) goto end; /* fallthrough */ case GL_INTERNALFORMAT_RED_SIZE: case GL_INTERNALFORMAT_GREEN_SIZE: case GL_INTERNALFORMAT_BLUE_SIZE: case GL_INTERNALFORMAT_ALPHA_SIZE: case GL_INTERNALFORMAT_STENCIL_SIZE: buffer[0] = _mesa_get_format_bits(texformat, pname); break; case GL_INTERNALFORMAT_DEPTH_TYPE: if (!_mesa_has_ARB_texture_float(ctx)) goto end; /* fallthrough */ case GL_INTERNALFORMAT_RED_TYPE: case GL_INTERNALFORMAT_GREEN_TYPE: case GL_INTERNALFORMAT_BLUE_TYPE: case GL_INTERNALFORMAT_ALPHA_TYPE: case GL_INTERNALFORMAT_STENCIL_TYPE: buffer[0] = _mesa_get_format_datatype(texformat); break; default: break; } break; } /* For WIDTH/HEIGHT/DEPTH/LAYERS there is no reason to think that the * returned values should be different to the values returned by * GetInteger with MAX_TEXTURE_SIZE, MAX_3D_TEXTURE_SIZE, etc.*/ case GL_MAX_WIDTH: case GL_MAX_HEIGHT: case GL_MAX_DEPTH: { GLenum get_pname; GLint dimensions; GLint min_dimensions; /* From query2:MAX_HEIGHT spec (as example): * * "If the resource does not have at least two dimensions, or if the * resource is unsupported, zero is returned." */ dimensions = _get_target_dimensions(target); min_dimensions = _get_min_dimensions(pname); if (dimensions < min_dimensions) goto end; get_pname = _equivalent_size_pname(target, pname); if (get_pname == 0) goto end; _mesa_GetIntegerv(get_pname, buffer); break; } case GL_MAX_LAYERS: if (!_mesa_has_EXT_texture_array(ctx)) goto end; if (!_mesa_is_array_texture(target)) goto end; _mesa_GetIntegerv(GL_MAX_ARRAY_TEXTURE_LAYERS, buffer); break; case GL_MAX_COMBINED_DIMENSIONS:{ GLint64 combined_value = 1; GLenum max_dimensions_pnames[] = { GL_MAX_WIDTH, GL_MAX_HEIGHT, GL_MAX_DEPTH, GL_SAMPLES }; unsigned i; GLint current_value; /* Combining the dimensions. Note that for array targets, this would * automatically include the value of MAX_LAYERS, as that value is * returned as MAX_HEIGHT or MAX_DEPTH */ for (i = 0; i < 4; i++) { if (max_dimensions_pnames[i] == GL_SAMPLES && !_is_multisample_target(target)) continue; _mesa_GetInternalformativ(target, internalformat, max_dimensions_pnames[i], 1, ¤t_value); if (current_value != 0) combined_value *= current_value; } if (_mesa_is_cube_map_texture(target)) combined_value *= 6; /* We pack the 64-bit value on two 32-bit values. Calling the 32-bit * query, this would work as far as the value can be hold on a 32-bit * signed integer. For the 64-bit query, the wrapper around the 32-bit * query will unpack the value */ memcpy(buffer, &combined_value, sizeof(GLint64)); break; } case GL_COLOR_COMPONENTS: /* The ARB_internalformat_query2 spec says: * * "- COLOR_COMPONENTS: If the internal format contains any color * components (R, G, B, or A), TRUE is returned in <params>. * If the internal format is unsupported or contains no color * components, FALSE is returned." */ if (_mesa_is_color_format(internalformat)) buffer[0] = GL_TRUE; break; case GL_DEPTH_COMPONENTS: /* The ARB_internalformat_query2 spec says: * * "- DEPTH_COMPONENTS: If the internal format contains a depth * component (D), TRUE is returned in <params>. If the internal format * is unsupported or contains no depth component, FALSE is returned." */ if (_mesa_is_depth_format(internalformat) || _mesa_is_depthstencil_format(internalformat)) buffer[0] = GL_TRUE; break; case GL_STENCIL_COMPONENTS: /* The ARB_internalformat_query2 spec says: * * "- STENCIL_COMPONENTS: If the internal format contains a stencil * component (S), TRUE is returned in <params>. If the internal format * is unsupported or contains no stencil component, FALSE is returned. */ if (_mesa_is_stencil_format(internalformat) || _mesa_is_depthstencil_format(internalformat)) buffer[0] = GL_TRUE; break; case GL_COLOR_RENDERABLE: case GL_DEPTH_RENDERABLE: case GL_STENCIL_RENDERABLE: if (!_is_renderable(ctx, internalformat)) goto end; if (pname == GL_COLOR_RENDERABLE) { if (!_mesa_is_color_format(internalformat)) goto end; } else { GLenum baseFormat = _mesa_base_fbo_format(ctx, internalformat); if (baseFormat != GL_DEPTH_STENCIL && ((pname == GL_DEPTH_RENDERABLE && baseFormat != GL_DEPTH_COMPONENT) || (pname == GL_STENCIL_RENDERABLE && baseFormat != GL_STENCIL_INDEX))) goto end; } buffer[0] = GL_TRUE; break; case GL_FRAMEBUFFER_RENDERABLE_LAYERED: if (!_mesa_has_EXT_texture_array(ctx) || _legal_target_for_framebuffer_texture_layer(ctx, target)) goto end; /* fallthrough */ case GL_FRAMEBUFFER_RENDERABLE: case GL_FRAMEBUFFER_BLEND: if (!_mesa_has_ARB_framebuffer_object(ctx)) goto end; if (target == GL_TEXTURE_BUFFER || !_is_renderable(ctx, internalformat)) goto end; ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); break; case GL_READ_PIXELS: case GL_READ_PIXELS_FORMAT: case GL_READ_PIXELS_TYPE: ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); break; case GL_TEXTURE_IMAGE_FORMAT: case GL_GET_TEXTURE_IMAGE_FORMAT: case GL_TEXTURE_IMAGE_TYPE: case GL_GET_TEXTURE_IMAGE_TYPE: ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); break; case GL_MIPMAP: case GL_MANUAL_GENERATE_MIPMAP: case GL_AUTO_GENERATE_MIPMAP: if (!_mesa_is_valid_generate_texture_mipmap_target(ctx, target) || !_mesa_is_valid_generate_texture_mipmap_internalformat(ctx, internalformat)) { goto end; } if (pname == GL_MIPMAP) { buffer[0] = GL_TRUE; goto end; } else if (pname == GL_MANUAL_GENERATE_MIPMAP) { if (!_mesa_has_ARB_framebuffer_object(ctx)) goto end; } else { /* From ARB_internalformat_query2: * "Dependencies on OpenGL 3.2 (Core Profile) * In core profiles for OpenGL 3.2 and later versions, queries * for the AUTO_GENERATE_MIPMAP <pname> return the appropriate * unsupported response." */ if (_mesa_is_desktop_gl(ctx) && ctx->Version >= 32) goto end; } ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); break; case GL_COLOR_ENCODING: if (!_mesa_is_color_format(internalformat)) goto end; if (_mesa_is_srgb_format(internalformat)) buffer[0] = GL_SRGB; else buffer[0] = GL_LINEAR; break; case GL_SRGB_READ: if (!_mesa_has_EXT_texture_sRGB(ctx) || !_mesa_is_srgb_format(internalformat)) { goto end; } ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); break; case GL_SRGB_WRITE: if (!_mesa_has_EXT_framebuffer_sRGB(ctx) || !_mesa_is_color_format(internalformat)) { goto end; } ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); break; case GL_SRGB_DECODE_ARB: /* Presence of EXT_texture_sRGB_decode was already verified */ if (!_mesa_has_EXT_texture_sRGB(ctx) || target == GL_RENDERBUFFER || !_mesa_is_srgb_format(internalformat)) { goto end; } ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); break; case GL_FILTER: /* If it doesn't allow to set sampler parameters then it would not allow * to set a filter different to GL_NEAREST. In practice, this method * only filters out MULTISAMPLE/MULTISAMPLE_ARRAY */ if (!_mesa_target_allows_setting_sampler_parameters(target)) goto end; if (_mesa_is_enum_format_integer(internalformat)) goto end; if (target == GL_TEXTURE_BUFFER) goto end; /* At this point we know that multi-texel filtering is supported. We * need to call the driver to know if it is CAVEAT_SUPPORT or * FULL_SUPPORT. */ ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); break; case GL_VERTEX_TEXTURE: case GL_TESS_CONTROL_TEXTURE: case GL_TESS_EVALUATION_TEXTURE: case GL_GEOMETRY_TEXTURE: case GL_FRAGMENT_TEXTURE: case GL_COMPUTE_TEXTURE: if (target == GL_RENDERBUFFER) goto end; if ((pname == GL_TESS_CONTROL_TEXTURE || pname == GL_TESS_EVALUATION_TEXTURE) && !_mesa_has_tessellation(ctx)) goto end; if (pname == GL_GEOMETRY_TEXTURE && !_mesa_has_geometry_shaders(ctx)) goto end; if (pname == GL_COMPUTE_TEXTURE && !_mesa_has_compute_shaders(ctx)) goto end; ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); break; case GL_TEXTURE_GATHER: case GL_TEXTURE_GATHER_SHADOW: if (!_mesa_has_ARB_texture_gather(ctx)) goto end; /* fallthrough */ case GL_TEXTURE_SHADOW: /* Only depth or depth-stencil image formats make sense in shadow samplers */ if (pname != GL_TEXTURE_GATHER && !_mesa_is_depth_format(internalformat) && !_mesa_is_depthstencil_format(internalformat)) goto end; /* Validate the target for shadow and gather operations */ switch (target) { case GL_TEXTURE_2D: case GL_TEXTURE_2D_ARRAY: case GL_TEXTURE_CUBE_MAP: case GL_TEXTURE_CUBE_MAP_ARRAY: case GL_TEXTURE_RECTANGLE: break; case GL_TEXTURE_1D: case GL_TEXTURE_1D_ARRAY: /* 1D and 1DArray textures are not admitted in gather operations */ if (pname != GL_TEXTURE_SHADOW) goto end; break; default: goto end; } ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); break; case GL_SHADER_IMAGE_LOAD: case GL_SHADER_IMAGE_STORE: if (!_mesa_has_ARB_shader_image_load_store(ctx)) goto end; /* We call to _mesa_is_shader_image_format_supported * using "internalformat" as parameter, because the * the ARB_internalformat_query2 spec says: * "In this case the <internalformat> is the value of the <format> * parameter that is passed to BindImageTexture." */ if (target == GL_RENDERBUFFER || !_mesa_is_shader_image_format_supported(ctx, internalformat)) goto end; ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); break; case GL_SHADER_IMAGE_ATOMIC: if (!_mesa_has_ARB_shader_image_load_store(ctx)) goto end; ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); break; case GL_IMAGE_TEXEL_SIZE: { mesa_format image_format; if (!_mesa_has_ARB_shader_image_load_store(ctx) || target == GL_RENDERBUFFER) goto end; image_format = _mesa_get_shader_image_format(internalformat); if (image_format == MESA_FORMAT_NONE) goto end; /* We return bits */ buffer[0] = (_mesa_get_format_bytes(image_format) * 8); break; } case GL_IMAGE_COMPATIBILITY_CLASS: if (!_mesa_has_ARB_shader_image_load_store(ctx) || target == GL_RENDERBUFFER) goto end; buffer[0] = _mesa_get_image_format_class(internalformat); break; case GL_IMAGE_PIXEL_FORMAT: { GLint base_format; if (!_mesa_has_ARB_shader_image_load_store(ctx) || target == GL_RENDERBUFFER || !_mesa_is_shader_image_format_supported(ctx, internalformat)) goto end; base_format = _mesa_base_tex_format(ctx, internalformat); if (base_format == -1) goto end; if (_mesa_is_enum_format_integer(internalformat)) buffer[0] = _mesa_base_format_to_integer_format(base_format); else buffer[0] = base_format; break; } case GL_IMAGE_PIXEL_TYPE: { mesa_format image_format; GLenum datatype; GLuint comps; if (!_mesa_has_ARB_shader_image_load_store(ctx) || target == GL_RENDERBUFFER) goto end; image_format = _mesa_get_shader_image_format(internalformat); if (image_format == MESA_FORMAT_NONE) goto end; _mesa_uncompressed_format_to_type_and_comps(image_format, &datatype, &comps); if (!datatype) goto end; buffer[0] = datatype; break; } case GL_IMAGE_FORMAT_COMPATIBILITY_TYPE: { if (!_mesa_has_ARB_shader_image_load_store(ctx)) goto end; if (!_mesa_legal_get_tex_level_parameter_target(ctx, target, true)) goto end; /* From spec: "Equivalent to calling GetTexParameter with <value> set * to IMAGE_FORMAT_COMPATIBILITY_TYPE." * * GetTexParameter just returns * tex_obj->ImageFormatCompatibilityType. We create a fake tex_obj * just with the purpose of getting the value. */ struct gl_texture_object *tex_obj = _mesa_new_texture_object(ctx, 0, target); buffer[0] = tex_obj->ImageFormatCompatibilityType; _mesa_delete_texture_object(ctx, tex_obj); break; } case GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_TEST: case GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_TEST: case GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_WRITE: case GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_WRITE: if (target == GL_RENDERBUFFER) goto end; if (!_mesa_is_depthstencil_format(internalformat)) { if (((pname == GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_TEST || pname == GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_WRITE) && !_mesa_is_depth_format(internalformat)) || ((pname == GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_TEST || pname == GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_WRITE) && !_mesa_is_stencil_format(internalformat))) goto end; } ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); break; case GL_TEXTURE_COMPRESSED: buffer[0] = _mesa_is_compressed_format(ctx, internalformat); break; case GL_TEXTURE_COMPRESSED_BLOCK_WIDTH: case GL_TEXTURE_COMPRESSED_BLOCK_HEIGHT: case GL_TEXTURE_COMPRESSED_BLOCK_SIZE: { mesa_format mesaformat; GLint block_size; mesaformat = _mesa_glenum_to_compressed_format(internalformat); if (mesaformat == MESA_FORMAT_NONE) goto end; block_size = _mesa_get_format_bytes(mesaformat); assert(block_size > 0); if (pname == GL_TEXTURE_COMPRESSED_BLOCK_SIZE) { buffer[0] = block_size; } else { GLuint bwidth, bheight; /* Returns the width and height in pixels. We return bytes */ _mesa_get_format_block_size(mesaformat, &bwidth, &bheight); assert(bwidth > 0 && bheight > 0); if (pname == GL_TEXTURE_COMPRESSED_BLOCK_WIDTH) buffer[0] = block_size / bheight; else buffer[0] = block_size / bwidth; } break; } case GL_CLEAR_BUFFER: if (target != GL_TEXTURE_BUFFER) goto end; ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); break; case GL_TEXTURE_VIEW: case GL_VIEW_COMPATIBILITY_CLASS: if (!_mesa_has_ARB_texture_view(ctx) || target == GL_TEXTURE_BUFFER || target == GL_RENDERBUFFER) goto end; if (pname == GL_TEXTURE_VIEW) { ctx->Driver.QueryInternalFormat(ctx, target, internalformat, pname, buffer); } else { GLenum view_class = _mesa_texture_view_lookup_view_class(ctx, internalformat); if (view_class == GL_FALSE) goto end; buffer[0] = view_class; } break; default: unreachable("bad param"); } end: if (bufSize != 0 && params == NULL) { /* Emit a warning to aid application debugging, but go ahead and do the * memcpy (and probably crash) anyway. */ _mesa_warning(ctx, "glGetInternalformativ(bufSize = %d, but params = NULL)", bufSize); } /* Copy the data from the temporary buffer to the buffer supplied by the * application. Clamp the size of the copy to the size supplied by the * application. */ memcpy(params, buffer, MIN2(bufSize, 16) * sizeof(GLint)); return; }
void GLAPIENTRY _mesa_CopyImageSubData(GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth) { GET_CURRENT_CONTEXT(ctx); GLuint tmpTexNames[2] = { 0, 0 }; struct gl_texture_object *srcTexObj, *dstTexObj; struct gl_texture_image *srcTexImage, *dstTexImage; GLuint src_w, src_h, dst_w, dst_h; GLuint src_bw, src_bh, dst_bw, dst_bh; int i; if (MESA_VERBOSE & VERBOSE_API) _mesa_debug(ctx, "glCopyImageSubData(%u, %s, %d, %d, %d, %d, " "%u, %s, %d, %d, %d, %d, " "%d, %d, %d)\n", srcName, _mesa_enum_to_string(srcTarget), srcLevel, srcX, srcY, srcZ, dstName, _mesa_enum_to_string(dstTarget), dstLevel, dstX, dstY, dstZ, srcWidth, srcHeight, srcWidth); if (!ctx->Extensions.ARB_copy_image) { _mesa_error(ctx, GL_INVALID_OPERATION, "glCopyImageSubData(extension not available)"); return; } if (!prepare_target(ctx, srcName, &srcTarget, srcLevel, &srcTexObj, &srcTexImage, &tmpTexNames[0], &src_w, &src_h, "src")) goto cleanup; if (!prepare_target(ctx, dstName, &dstTarget, dstLevel, &dstTexObj, &dstTexImage, &tmpTexNames[1], &dst_w, &dst_h, "dst")) goto cleanup; _mesa_get_format_block_size(srcTexImage->TexFormat, &src_bw, &src_bh); /* Section 18.3.2 (Copying Between Images) of the OpenGL 4.5 Core Profile * spec says: * * An INVALID_VALUE error is generated if the dimensions of either * subregion exceeds the boundaries of the corresponding image object, * or if the image format is compressed and the dimensions of the * subregion fail to meet the alignment constraints of the format. * * and Section 8.7 (Compressed Texture Images) says: * * An INVALID_OPERATION error is generated if any of the following * conditions occurs: * * * width is not a multiple of four, and width + xoffset is not * equal to the value of TEXTURE_WIDTH. * * height is not a multiple of four, and height + yoffset is not * equal to the value of TEXTURE_HEIGHT. * * so we take that to mean that you can copy the "last" block of a * compressed texture image even if it's smaller than the minimum block * dimensions. */ if ((srcX % src_bw != 0) || (srcY % src_bh != 0) || (srcWidth % src_bw != 0 && (srcX + srcWidth) != src_w) || (srcHeight % src_bh != 0 && (srcY + srcHeight) != src_h)) { _mesa_error(ctx, GL_INVALID_VALUE, "glCopyImageSubData(unaligned src rectangle)"); goto cleanup; } _mesa_get_format_block_size(dstTexImage->TexFormat, &dst_bw, &dst_bh); if ((dstX % dst_bw != 0) || (dstY % dst_bh != 0)) { _mesa_error(ctx, GL_INVALID_VALUE, "glCopyImageSubData(unaligned dst rectangle)"); goto cleanup; } if (!check_region_bounds(ctx, srcTexImage, srcX, srcY, srcZ, srcWidth, srcHeight, srcDepth, "src")) goto cleanup; if (!check_region_bounds(ctx, dstTexImage, dstX, dstY, dstZ, (srcWidth / src_bw) * dst_bw, (srcHeight / src_bh) * dst_bh, srcDepth, "dst")) goto cleanup; if (!copy_format_compatible(ctx, srcTexImage->InternalFormat, dstTexImage->InternalFormat)) { _mesa_error(ctx, GL_INVALID_OPERATION, "glCopyImageSubData(internalFormat mismatch)"); goto cleanup; } for (i = 0; i < srcDepth; ++i) { int srcNewZ, dstNewZ; if (srcTexObj->Target == GL_TEXTURE_CUBE_MAP) { srcTexImage = srcTexObj->Image[i + srcZ][srcLevel]; srcNewZ = 0; } else { srcNewZ = srcZ + i; } if (dstTexObj->Target == GL_TEXTURE_CUBE_MAP) { dstTexImage = dstTexObj->Image[i + dstZ][dstLevel]; dstNewZ = 0; } else { dstNewZ = dstZ + i; } ctx->Driver.CopyImageSubData(ctx, srcTexImage, srcX, srcY, srcNewZ, dstTexImage, dstX, dstY, dstNewZ, srcWidth, srcHeight); } cleanup: _mesa_DeleteTextures(2, tmpTexNames); }
void GLAPIENTRY _mesa_CopyImageSubData(GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth) { GET_CURRENT_CONTEXT(ctx); struct gl_texture_image *srcTexImage, *dstTexImage; struct gl_renderbuffer *srcRenderbuffer, *dstRenderbuffer; mesa_format srcFormat, dstFormat; GLenum srcIntFormat, dstIntFormat; GLuint src_w, src_h, dst_w, dst_h; GLuint src_bw, src_bh, dst_bw, dst_bh; GLuint src_num_samples, dst_num_samples; int dstWidth, dstHeight, dstDepth; int i; if (MESA_VERBOSE & VERBOSE_API) _mesa_debug(ctx, "glCopyImageSubData(%u, %s, %d, %d, %d, %d, " "%u, %s, %d, %d, %d, %d, " "%d, %d, %d)\n", srcName, _mesa_enum_to_string(srcTarget), srcLevel, srcX, srcY, srcZ, dstName, _mesa_enum_to_string(dstTarget), dstLevel, dstX, dstY, dstZ, srcWidth, srcHeight, srcDepth); if (!ctx->Extensions.ARB_copy_image) { _mesa_error(ctx, GL_INVALID_OPERATION, "glCopyImageSubData(extension not available)"); return; } if (!prepare_target(ctx, srcName, srcTarget, srcLevel, srcZ, srcDepth, &srcTexImage, &srcRenderbuffer, &srcFormat, &srcIntFormat, &src_w, &src_h, &src_num_samples, "src")) return; if (!prepare_target(ctx, dstName, dstTarget, dstLevel, dstZ, srcDepth, &dstTexImage, &dstRenderbuffer, &dstFormat, &dstIntFormat, &dst_w, &dst_h, &dst_num_samples, "dst")) return; _mesa_get_format_block_size(srcFormat, &src_bw, &src_bh); /* Section 18.3.2 (Copying Between Images) of the OpenGL 4.5 Core Profile * spec says: * * An INVALID_VALUE error is generated if the dimensions of either * subregion exceeds the boundaries of the corresponding image object, * or if the image format is compressed and the dimensions of the * subregion fail to meet the alignment constraints of the format. * * and Section 8.7 (Compressed Texture Images) says: * * An INVALID_OPERATION error is generated if any of the following * conditions occurs: * * * width is not a multiple of four, and width + xoffset is not * equal to the value of TEXTURE_WIDTH. * * height is not a multiple of four, and height + yoffset is not * equal to the value of TEXTURE_HEIGHT. * * so we take that to mean that you can copy the "last" block of a * compressed texture image even if it's smaller than the minimum block * dimensions. */ if ((srcX % src_bw != 0) || (srcY % src_bh != 0) || (srcWidth % src_bw != 0 && (srcX + srcWidth) != src_w) || (srcHeight % src_bh != 0 && (srcY + srcHeight) != src_h)) { _mesa_error(ctx, GL_INVALID_VALUE, "glCopyImageSubData(unaligned src rectangle)"); return; } _mesa_get_format_block_size(dstFormat, &dst_bw, &dst_bh); if ((dstX % dst_bw != 0) || (dstY % dst_bh != 0)) { _mesa_error(ctx, GL_INVALID_VALUE, "glCopyImageSubData(unaligned dst rectangle)"); return; } /* From the GL_ARB_copy_image spec: * * "The dimensions are always specified in texels, even for compressed * texture formats. But it should be noted that if only one of the * source and destination textures is compressed then the number of * texels touched in the compressed image will be a factor of the * block size larger than in the uncompressed image." * * So, if copying from compressed to uncompressed, the dest region is * shrunk by the src block size factor. If copying from uncompressed * to compressed, the dest region is grown by the dest block size factor. * Note that we're passed the _source_ width, height, depth and those * dimensions are never changed. */ dstWidth = srcWidth * dst_bw / src_bw; dstHeight = srcHeight * dst_bh / src_bh; dstDepth = srcDepth; if (!check_region_bounds(ctx, srcTarget, srcTexImage, srcRenderbuffer, srcX, srcY, srcZ, srcWidth, srcHeight, srcDepth, "src")) return; if (!check_region_bounds(ctx, dstTarget, dstTexImage, dstRenderbuffer, dstX, dstY, dstZ, dstWidth, dstHeight, dstDepth, "dst")) return; /* Section 18.3.2 (Copying Between Images) of the OpenGL 4.5 Core Profile * spec says: * * An INVALID_OPERATION error is generated if either object is a texture * and the texture is not complete, if the source and destination internal * formats are not compatible, or if the number of samples do not match. */ if (!copy_format_compatible(ctx, srcIntFormat, dstIntFormat)) { _mesa_error(ctx, GL_INVALID_OPERATION, "glCopyImageSubData(internalFormat mismatch)"); return; } if (src_num_samples != dst_num_samples) { _mesa_error(ctx, GL_INVALID_OPERATION, "glCopyImageSubData(number of samples mismatch)"); return; } /* loop over 2D slices/faces/layers */ for (i = 0; i < srcDepth; ++i) { int newSrcZ = srcZ + i; int newDstZ = dstZ + i; if (srcTexImage && srcTexImage->TexObject->Target == GL_TEXTURE_CUBE_MAP) { /* need to update srcTexImage pointer for the cube face */ assert(srcZ + i < MAX_FACES); srcTexImage = srcTexImage->TexObject->Image[srcZ + i][srcLevel]; assert(srcTexImage); newSrcZ = 0; } if (dstTexImage && dstTexImage->TexObject->Target == GL_TEXTURE_CUBE_MAP) { /* need to update dstTexImage pointer for the cube face */ assert(dstZ + i < MAX_FACES); dstTexImage = dstTexImage->TexObject->Image[dstZ + i][dstLevel]; assert(dstTexImage); newDstZ = 0; } ctx->Driver.CopyImageSubData(ctx, srcTexImage, srcRenderbuffer, srcX, srcY, newSrcZ, dstTexImage, dstRenderbuffer, dstX, dstY, newDstZ, srcWidth, srcHeight); } }
static void copy_image_with_memcpy(struct brw_context *brw, struct intel_mipmap_tree *src_mt, int src_level, int src_x, int src_y, int src_z, struct intel_mipmap_tree *dst_mt, int dst_level, int dst_x, int dst_y, int dst_z, int src_width, int src_height) { bool same_slice; void *mapped, *src_mapped, *dst_mapped; ptrdiff_t src_stride, dst_stride, cpp; int map_x1, map_y1, map_x2, map_y2; GLuint src_bw, src_bh; cpp = _mesa_get_format_bytes(src_mt->format); _mesa_get_format_block_size(src_mt->format, &src_bw, &src_bh); assert(src_width % src_bw == 0); assert(src_height % src_bh == 0); assert(src_x % src_bw == 0); assert(src_y % src_bh == 0); /* If we are on the same miptree, same level, and same slice, then * intel_miptree_map won't let us map it twice. We have to do things a * bit differently. In particular, we do a single map large enough for * both portions and in read-write mode. */ same_slice = src_mt == dst_mt && src_level == dst_level && src_z == dst_z; if (same_slice) { assert(dst_x % src_bw == 0); assert(dst_y % src_bh == 0); map_x1 = MIN2(src_x, dst_x); map_y1 = MIN2(src_y, dst_y); map_x2 = MAX2(src_x, dst_x) + src_width; map_y2 = MAX2(src_y, dst_y) + src_height; intel_miptree_map(brw, src_mt, src_level, src_z, map_x1, map_y1, map_x2 - map_x1, map_y2 - map_y1, GL_MAP_READ_BIT | GL_MAP_WRITE_BIT, &mapped, &src_stride); dst_stride = src_stride; /* Set the offsets here so we don't have to think about while looping */ src_mapped = mapped + ((src_y - map_y1) / src_bh) * src_stride + ((src_x - map_x1) / src_bw) * cpp; dst_mapped = mapped + ((dst_y - map_y1) / src_bh) * dst_stride + ((dst_x - map_x1) / src_bw) * cpp; } else { intel_miptree_map(brw, src_mt, src_level, src_z, src_x, src_y, src_width, src_height, GL_MAP_READ_BIT, &src_mapped, &src_stride); intel_miptree_map(brw, dst_mt, dst_level, dst_z, dst_x, dst_y, src_width, src_height, GL_MAP_WRITE_BIT, &dst_mapped, &dst_stride); } src_width /= (int)src_bw; src_height /= (int)src_bh; for (int i = 0; i < src_height; ++i) { memcpy(dst_mapped, src_mapped, src_width * cpp); src_mapped += src_stride; dst_mapped += dst_stride; } if (same_slice) { intel_miptree_unmap(brw, src_mt, src_level, src_z); } else { intel_miptree_unmap(brw, dst_mt, dst_level, dst_z); intel_miptree_unmap(brw, src_mt, src_level, src_z); } }