/** * All glTexSubImage calls go through this function. */ static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLsizei imageSize, GLenum format, GLenum type, const GLvoid * pixels, const struct gl_pixelstore_attrib *packing, struct gl_texture_object *texObj, struct gl_texture_image *texImage, int compressed) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); radeonTexObj* t = radeon_tex_obj(texObj); radeon_texture_image* image = get_radeon_texture_image(texImage); radeon_print(RADEON_TEXTURE, RADEON_NORMAL, "%s %dd: texObj %p, texImage %p, face %d, level %d\n", __func__, dims, texObj, texImage, _mesa_tex_target_to_face(target), level); { struct radeon_bo *bo; bo = !image->mt ? image->bo : image->mt->bo; if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) { radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, "%s Calling texsubimage for texture that is " "queued for GPU processing.\n", __func__); radeon_firevertices(rmesa); } } t->validated = GL_FALSE; if (compressed) { pixels = _mesa_validate_pbo_compressed_teximage( ctx, imageSize, pixels, packing, "glCompressedTexSubImage"); } else { pixels = _mesa_validate_pbo_teximage(ctx, dims, width, height, depth, format, type, pixels, packing, "glTexSubImage"); } if (pixels) { radeon_store_teximage(ctx, dims, xoffset, yoffset, zoffset, width, height, depth, imageSize, format, type, pixels, packing, texObj, texImage, compressed); } _mesa_unmap_teximage_pbo(ctx, packing); }
/** * All glTexImage calls go through this function. */ static void radeon_teximage( GLcontext *ctx, int dims, GLenum target, GLint level, GLint internalFormat, GLint width, GLint height, GLint depth, GLsizei imageSize, GLenum format, GLenum type, const GLvoid * pixels, const struct gl_pixelstore_attrib *packing, struct gl_texture_object *texObj, struct gl_texture_image *texImage, int compressed) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); radeonTexObj* t = radeon_tex_obj(texObj); radeon_texture_image* image = get_radeon_texture_image(texImage); GLint postConvWidth = width; GLint postConvHeight = height; GLuint face = _mesa_tex_target_to_face(target); radeon_print(RADEON_TEXTURE, RADEON_NORMAL, "%s %dd: texObj %p, texImage %p, face %d, level %d\n", __func__, dims, texObj, texImage, face, level); { struct radeon_bo *bo; bo = !image->mt ? image->bo : image->mt->bo; if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) { radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, "%s Calling teximage for texture that is " "queued for GPU processing.\n", __func__); radeon_firevertices(rmesa); } } t->validated = GL_FALSE; if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) { _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth, &postConvHeight); } if (!_mesa_is_format_compressed(texImage->TexFormat)) { GLuint texelBytes = _mesa_get_format_bytes(texImage->TexFormat); /* Minimum pitch of 32 bytes */ if (postConvWidth * texelBytes < 32) { postConvWidth = 32 / texelBytes; texImage->RowStride = postConvWidth; } if (!image->mt) { assert(texImage->RowStride == postConvWidth); } } /* Mesa core only clears texImage->Data but not image->mt */ radeonFreeTexImageData(ctx, texImage); if (!t->bo) { teximage_assign_miptree(rmesa, texObj, texImage, face, level); if (!image->mt) { int size = _mesa_format_image_size(texImage->TexFormat, texImage->Width, texImage->Height, texImage->Depth); texImage->Data = _mesa_alloc_texmemory(size); radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, "%s %dd: texObj %p, texImage %p, " " no miptree assigned, using local memory %p\n", __func__, dims, texObj, texImage, texImage->Data); } } /* Upload texture image; note that the spec allows pixels to be NULL */ if (compressed) { pixels = _mesa_validate_pbo_compressed_teximage( ctx, imageSize, pixels, packing, "glCompressedTexImage"); } else { pixels = _mesa_validate_pbo_teximage( ctx, dims, width, height, depth, format, type, pixels, packing, "glTexImage"); } if (pixels) { radeon_store_teximage(ctx, dims, 0, 0, 0, width, height, depth, imageSize, format, type, pixels, packing, texObj, texImage, compressed); } _mesa_unmap_teximage_pbo(ctx, packing); }
static bool intel_blit_texsubimage(struct gl_context * ctx, struct gl_texture_image *texImage, GLint xoffset, GLint yoffset, GLint width, GLint height, GLenum format, GLenum type, const void *pixels, const struct gl_pixelstore_attrib *packing) { struct intel_context *intel = intel_context(ctx); struct intel_texture_image *intelImage = intel_texture_image(texImage); /* Try to do a blit upload of the subimage if the texture is * currently busy. */ if (!intelImage->mt) return false; /* The blitter can't handle Y tiling */ if (intelImage->mt->region->tiling == I915_TILING_Y) return false; if (texImage->TexObject->Target != GL_TEXTURE_2D) return false; if (!drm_intel_bo_busy(intelImage->mt->region->bo)) return false; DBG("BLT subimage %s target %s level %d offset %d,%d %dx%d\n", __func__, _mesa_enum_to_string(texImage->TexObject->Target), texImage->Level, xoffset, yoffset, width, height); pixels = _mesa_validate_pbo_teximage(ctx, 2, width, height, 1, format, type, pixels, packing, "glTexSubImage"); if (!pixels) return false; struct intel_mipmap_tree *temp_mt = intel_miptree_create(intel, GL_TEXTURE_2D, texImage->TexFormat, 0, 0, width, height, 1, false, INTEL_MIPTREE_TILING_NONE); if (!temp_mt) goto err; GLubyte *dst = intel_miptree_map_raw(intel, temp_mt); if (!dst) goto err; if (!_mesa_texstore(ctx, 2, texImage->_BaseFormat, texImage->TexFormat, temp_mt->region->pitch, &dst, width, height, 1, format, type, pixels, packing)) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage"); } intel_miptree_unmap_raw(temp_mt); bool ret; ret = intel_miptree_blit(intel, temp_mt, 0, 0, 0, 0, false, intelImage->mt, texImage->Level, texImage->Face, xoffset, yoffset, false, width, height, COLOR_LOGICOP_COPY); assert(ret); intel_miptree_release(&temp_mt); _mesa_unmap_teximage_pbo(ctx, packing); return ret; err: _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage"); intel_miptree_release(&temp_mt); _mesa_unmap_teximage_pbo(ctx, packing); return false; }
static void intelTexImage(GLcontext * ctx, GLint dims, GLenum target, GLint level, GLint internalFormat, GLint width, GLint height, GLint depth, GLint border, GLenum format, GLenum type, const void *pixels, const struct gl_pixelstore_attrib *unpack, struct gl_texture_object *texObj, struct gl_texture_image *texImage, GLsizei imageSize, int compressed) { struct intel_context *intel = intel_context(ctx); struct intel_texture_object *intelObj = intel_texture_object(texObj); struct intel_texture_image *intelImage = intel_texture_image(texImage); GLint postConvWidth = width; GLint postConvHeight = height; GLint texelBytes, sizeInBytes; GLuint dstRowStride; DBG("%s target %s level %d %dx%dx%d border %d\n", __FUNCTION__, _mesa_lookup_enum_by_nr(target), level, width, height, depth, border); intelFlush(ctx); intelImage->face = target_to_face(target); intelImage->level = level; if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) { _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth, &postConvHeight); } /* choose the texture format */ texImage->TexFormat = intelChooseTextureFormat(ctx, internalFormat, format, type); _mesa_set_fetch_functions(texImage, dims); if (texImage->TexFormat->TexelBytes == 0) { /* must be a compressed format */ texelBytes = 0; texImage->IsCompressed = GL_TRUE; texImage->CompressedSize = ctx->Driver.CompressedTextureSize(ctx, texImage->Width, texImage->Height, texImage->Depth, texImage->TexFormat->MesaFormat); } else { texelBytes = texImage->TexFormat->TexelBytes; /* Minimum pitch of 32 bytes */ if (postConvWidth * texelBytes < 32) { postConvWidth = 32 / texelBytes; texImage->RowStride = postConvWidth; } assert(texImage->RowStride == postConvWidth); } /* Release the reference to a potentially orphaned buffer. * Release any old malloced memory. */ if (intelImage->mt) { intel_miptree_release(intel, &intelImage->mt); assert(!texImage->Data); } else if (texImage->Data) { _mesa_align_free(texImage->Data); } /* If this is the only texture image in the tree, could call * bmBufferData with NULL data to free the old block and avoid * waiting on any outstanding fences. */ if (intelObj->mt && intelObj->mt->first_level == level && intelObj->mt->last_level == level && intelObj->mt->target != GL_TEXTURE_CUBE_MAP_ARB && !intel_miptree_match_image(intelObj->mt, &intelImage->base, intelImage->face, intelImage->level)) { DBG("release it\n"); intel_miptree_release(intel, &intelObj->mt); assert(!intelObj->mt); } if (!intelObj->mt) { guess_and_alloc_mipmap_tree(intel, intelObj, intelImage); if (!intelObj->mt) { DBG("guess_and_alloc_mipmap_tree: failed\n"); } } assert(!intelImage->mt); if (intelObj->mt && intel_miptree_match_image(intelObj->mt, &intelImage->base, intelImage->face, intelImage->level)) { intel_miptree_reference(&intelImage->mt, intelObj->mt); assert(intelImage->mt); } if (!intelImage->mt) DBG("XXX: Image did not fit into tree - storing in local memory!\n"); /* PBO fastpaths: */ if (dims <= 2 && intelImage->mt && intel_buffer_object(unpack->BufferObj) && check_pbo_format(internalFormat, format, type, intelImage->base.TexFormat)) { DBG("trying pbo upload\n"); /* Attempt to texture directly from PBO data (zero copy upload). * * Currently disable as it can lead to worse as well as better * performance (in particular when intel_region_cow() is * required). */ if (intelObj->mt == intelImage->mt && intelObj->mt->first_level == level && intelObj->mt->last_level == level) { if (try_pbo_zcopy(intel, intelImage, unpack, internalFormat, width, height, format, type, pixels)) { DBG("pbo zcopy upload succeeded\n"); return; } } /* Otherwise, attempt to use the blitter for PBO image uploads. */ if (try_pbo_upload(intel, intelImage, unpack, internalFormat, width, height, format, type, pixels)) { DBG("pbo upload succeeded\n"); return; } DBG("pbo upload failed\n"); } /* intelCopyTexImage calls this function with pixels == NULL, with * the expectation that the mipmap tree will be set up but nothing * more will be done. This is where those calls return: */ if (compressed) { pixels = _mesa_validate_pbo_compressed_teximage(ctx, imageSize, pixels, unpack, "glCompressedTexImage"); } else { pixels = _mesa_validate_pbo_teximage(ctx, dims, width, height, 1, format, type, pixels, unpack, "glTexImage"); } if (!pixels) return; if (intelImage->mt) intel_region_idle(intel->intelScreen, intelImage->mt->region); LOCK_HARDWARE(intel); if (intelImage->mt) { texImage->Data = intel_miptree_image_map(intel, intelImage->mt, intelImage->face, intelImage->level, &dstRowStride, intelImage->base.ImageOffsets); } else { /* Allocate regular memory and store the image there temporarily. */ if (texImage->IsCompressed) { sizeInBytes = texImage->CompressedSize; dstRowStride = _mesa_compressed_row_stride(texImage->TexFormat->MesaFormat, width); assert(dims != 3); } else { dstRowStride = postConvWidth * texelBytes; sizeInBytes = depth * dstRowStride * postConvHeight; } texImage->Data = malloc(sizeInBytes); } DBG("Upload image %dx%dx%d row_len %x " "pitch %x\n", width, height, depth, width * texelBytes, dstRowStride); /* Copy data. Would like to know when it's ok for us to eg. use * the blitter to copy. Or, use the hardware to do the format * conversion and copy: */ if (compressed) { memcpy(texImage->Data, pixels, imageSize); } else if (!texImage->TexFormat->StoreImage(ctx, dims, texImage->_BaseFormat, texImage->TexFormat, texImage->Data, 0, 0, 0, /* dstX/Y/Zoffset */ dstRowStride, texImage->ImageOffsets, width, height, depth, format, type, pixels, unpack)) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage"); } _mesa_unmap_teximage_pbo(ctx, unpack); if (intelImage->mt) { intel_miptree_image_unmap(intel, intelImage->mt); texImage->Data = NULL; } UNLOCK_HARDWARE(intel); #if 0 /* GL_SGIS_generate_mipmap -- this can be accelerated now. */ if (level == texObj->BaseLevel && texObj->GenerateMipmap) { intel_generate_mipmap(ctx, target, &ctx->Texture.Unit[ctx->Texture.CurrentUnit], texObj); } #endif }
/** * Helper function for storing 1D, 2D, 3D whole and subimages into texture * memory. * The source of the image data may be user memory or a PBO. In the later * case, we'll map the PBO, copy from it, then unmap it. */ static void store_texsubimage(struct gl_context *ctx, struct gl_texture_image *texImage, GLint xoffset, GLint yoffset, GLint zoffset, GLint width, GLint height, GLint depth, GLenum format, GLenum type, const GLvoid *pixels, const struct gl_pixelstore_attrib *packing, const char *caller) { const GLbitfield mapMode = get_read_write_mode(format, texImage->TexFormat); const GLenum target = texImage->TexObject->Target; GLboolean success = GL_FALSE; GLuint dims, slice, numSlices = 1, sliceOffset = 0; GLint srcImageStride = 0; const GLubyte *src; assert(xoffset + width <= texImage->Width); assert(yoffset + height <= texImage->Height); assert(zoffset + depth <= texImage->Depth); switch (target) { case GL_TEXTURE_1D: dims = 1; break; case GL_TEXTURE_2D_ARRAY: case GL_TEXTURE_CUBE_MAP_ARRAY: case GL_TEXTURE_3D: dims = 3; break; default: dims = 2; } /* get pointer to src pixels (may be in a pbo which we'll map here) */ src = (const GLubyte *) _mesa_validate_pbo_teximage(ctx, dims, width, height, depth, format, type, pixels, packing, caller); if (!src) return; /* compute slice info (and do some sanity checks) */ switch (target) { case GL_TEXTURE_2D: case GL_TEXTURE_RECTANGLE: case GL_TEXTURE_CUBE_MAP: case GL_TEXTURE_EXTERNAL_OES: /* one image slice, nothing special needs to be done */ break; case GL_TEXTURE_1D: assert(height == 1); assert(depth == 1); assert(yoffset == 0); assert(zoffset == 0); break; case GL_TEXTURE_1D_ARRAY: assert(depth == 1); assert(zoffset == 0); numSlices = height; sliceOffset = yoffset; height = 1; yoffset = 0; srcImageStride = _mesa_image_row_stride(packing, width, format, type); break; case GL_TEXTURE_2D_ARRAY: numSlices = depth; sliceOffset = zoffset; depth = 1; zoffset = 0; srcImageStride = _mesa_image_image_stride(packing, width, height, format, type); break; case GL_TEXTURE_3D: /* we'll store 3D images as a series of slices */ numSlices = depth; sliceOffset = zoffset; srcImageStride = _mesa_image_image_stride(packing, width, height, format, type); break; case GL_TEXTURE_CUBE_MAP_ARRAY: numSlices = depth; sliceOffset = zoffset; srcImageStride = _mesa_image_image_stride(packing, width, height, format, type); break; default: _mesa_warning(ctx, "Unexpected target 0x%x in store_texsubimage()", target); return; } assert(numSlices == 1 || srcImageStride != 0); for (slice = 0; slice < numSlices; slice++) { GLubyte *dstMap; GLint dstRowStride; ctx->Driver.MapTextureImage(ctx, texImage, slice + sliceOffset, xoffset, yoffset, width, height, mapMode, &dstMap, &dstRowStride); if (dstMap) { /* Note: we're only storing a 2D (or 1D) slice at a time but we need * to pass the right 'dims' value so that GL_UNPACK_SKIP_IMAGES is * used for 3D images. */ success = _mesa_texstore(ctx, dims, texImage->_BaseFormat, texImage->TexFormat, dstRowStride, &dstMap, width, height, 1, /* w, h, d */ format, type, src, packing); ctx->Driver.UnmapTextureImage(ctx, texImage, slice + sliceOffset); } src += srcImageStride; if (!success) break; } if (!success) _mesa_error(ctx, GL_OUT_OF_MEMORY, "%s", caller); _mesa_unmap_teximage_pbo(ctx, packing); }