static GLboolean intel_alloc_texture_image_buffer(struct gl_context *ctx, struct gl_texture_image *image) { struct intel_context *intel = intel_context(ctx); struct intel_texture_image *intel_image = intel_texture_image(image); struct gl_texture_object *texobj = image->TexObject; struct intel_texture_object *intel_texobj = intel_texture_object(texobj); GLuint slices; assert(image->Border == 0); /* Because the driver uses AllocTextureImageBuffer() internally, it may end * up mismatched with FreeTextureImageBuffer(), but that is safe to call * multiple times. */ ctx->Driver.FreeTextureImageBuffer(ctx, image); /* Allocate the swrast_texture_image::ImageOffsets array now */ switch (texobj->Target) { case GL_TEXTURE_3D: case GL_TEXTURE_2D_ARRAY: slices = image->Depth; break; case GL_TEXTURE_1D_ARRAY: slices = image->Height; break; default: slices = 1; } assert(!intel_image->base.ImageOffsets); intel_image->base.ImageOffsets = malloc(slices * sizeof(GLuint)); _swrast_init_texture_image(image); if (intel_texobj->mt && intel_miptree_match_image(intel_texobj->mt, image)) { intel_miptree_reference(&intel_image->mt, intel_texobj->mt); DBG("%s: alloc obj %p level %d %dx%dx%d using object's miptree %p\n", __FUNCTION__, texobj, image->Level, image->Width, image->Height, image->Depth, intel_texobj->mt); } else { intel_image->mt = intel_miptree_create_for_teximage(intel, intel_texobj, intel_image, false); /* Even if the object currently has a mipmap tree associated * with it, this one is a more likely candidate to represent the * whole object since our level didn't fit what was there * before, and any lower levels would fit into our miptree. */ intel_miptree_reference(&intel_texobj->mt, intel_image->mt); DBG("%s: alloc obj %p level %d %dx%dx%d using new miptree %p\n", __FUNCTION__, texobj, image->Level, image->Width, image->Height, image->Depth, intel_image->mt); } return true; }
static GLboolean intel_bind_renderbuffer_tex_image(struct gl_context *ctx, struct gl_renderbuffer *rb, struct gl_texture_image *image) { struct intel_renderbuffer *irb = intel_renderbuffer(rb); struct intel_texture_image *intel_image = intel_texture_image(image); struct gl_texture_object *texobj = image->TexObject; struct intel_texture_object *intel_texobj = intel_texture_object(texobj); /* We can only handle RB allocated with AllocRenderbufferStorage, or * window-system renderbuffers. */ assert(!rb->TexImage); if (!irb->mt) return false; _mesa_lock_texture(ctx, texobj); _mesa_init_teximage_fields(ctx, image, rb->Width, rb->Height, 1, 0, rb->InternalFormat, rb->Format); image->NumSamples = rb->NumSamples; intel_miptree_reference(&intel_image->mt, irb->mt); /* Immediately validate the image to the object. */ intel_miptree_reference(&intel_texobj->mt, intel_image->mt); intel_texobj->needs_validate = true; _mesa_unlock_texture(ctx, texobj); return true; }
static void intel_set_texture_image_mt(struct brw_context *brw, struct gl_texture_image *image, GLenum internal_format, struct intel_mipmap_tree *mt) { struct gl_texture_object *texobj = image->TexObject; struct intel_texture_object *intel_texobj = intel_texture_object(texobj); struct intel_texture_image *intel_image = intel_texture_image(image); _mesa_init_teximage_fields(&brw->ctx, image, mt->logical_width0, mt->logical_height0, 1, 0, internal_format, mt->format); brw->ctx.Driver.FreeTextureImageBuffer(&brw->ctx, image); intel_texobj->needs_validate = true; intel_image->base.RowStride = mt->pitch / mt->cpp; assert(mt->pitch % mt->cpp == 0); intel_miptree_reference(&intel_image->mt, mt); /* Immediately validate the image to the object. */ intel_miptree_reference(&intel_texobj->mt, mt); }
static GLboolean intel_alloc_texture_image_buffer(struct gl_context *ctx, struct gl_texture_image *image) { struct brw_context *brw = brw_context(ctx); struct intel_texture_image *intel_image = intel_texture_image(image); struct gl_texture_object *texobj = image->TexObject; struct intel_texture_object *intel_texobj = intel_texture_object(texobj); assert(image->Border == 0); /* Quantize sample count */ if (image->NumSamples) { image->NumSamples = intel_quantize_num_samples(brw->screen, image->NumSamples); if (!image->NumSamples) return false; } /* Because the driver uses AllocTextureImageBuffer() internally, it may end * up mismatched with FreeTextureImageBuffer(), but that is safe to call * multiple times. */ ctx->Driver.FreeTextureImageBuffer(ctx, image); if (!_swrast_init_texture_image(image)) return false; if (intel_texobj->mt && intel_miptree_match_image(intel_texobj->mt, image)) { intel_miptree_reference(&intel_image->mt, intel_texobj->mt); DBG("%s: alloc obj %p level %d %dx%dx%d using object's miptree %p\n", __func__, texobj, image->Level, image->Width, image->Height, image->Depth, intel_texobj->mt); } else { intel_image->mt = intel_miptree_create_for_teximage(brw, intel_texobj, intel_image, MIPTREE_CREATE_DEFAULT); if (!intel_image->mt) return false; /* Even if the object currently has a mipmap tree associated * with it, this one is a more likely candidate to represent the * whole object since our level didn't fit what was there * before, and any lower levels would fit into our miptree. */ intel_miptree_reference(&intel_texobj->mt, intel_image->mt); DBG("%s: alloc obj %p level %d %dx%dx%d using new miptree %p\n", __func__, texobj, image->Level, image->Width, image->Height, image->Depth, intel_image->mt); } intel_texobj->needs_validate = true; return true; }
void intel_renderbuffer_move_to_temp(struct brw_context *brw, struct intel_renderbuffer *irb, bool invalidate) { struct gl_renderbuffer *rb =&irb->Base.Base; struct intel_texture_image *intel_image = intel_texture_image(rb->TexImage); struct intel_mipmap_tree *new_mt; int width, height, depth; intel_miptree_get_dimensions_for_image(rb->TexImage, &width, &height, &depth); new_mt = intel_miptree_create(brw, rb->TexImage->TexObject->Target, intel_image->base.Base.TexFormat, intel_image->base.Base.Level, intel_image->base.Base.Level, width, height, depth, true, irb->mt->num_samples, INTEL_MIPTREE_TILING_ANY); if (brw_is_hiz_depth_format(brw, new_mt->format)) { intel_miptree_alloc_hiz(brw, new_mt); } intel_miptree_copy_teximage(brw, intel_image, new_mt, invalidate); intel_miptree_reference(&irb->mt, intel_image->mt); intel_renderbuffer_set_draw_offset(irb); intel_miptree_release(&new_mt); }
/** * Binds a region to a texture image, like it was uploaded by glTexImage2D(). * * Used for GLX_EXT_texture_from_pixmap and EGL image extensions, */ static void intel_set_texture_image_region(struct gl_context *ctx, struct gl_texture_image *image, struct intel_region *region, GLenum target, GLenum internalFormat, gl_format format, uint32_t offset, GLuint width, GLuint height, GLuint tile_x, GLuint tile_y) { struct intel_context *intel = intel_context(ctx); struct intel_texture_image *intel_image = intel_texture_image(image); struct gl_texture_object *texobj = image->TexObject; struct intel_texture_object *intel_texobj = intel_texture_object(texobj); bool has_surface_tile_offset = false; uint32_t draw_x, draw_y; _mesa_init_teximage_fields(&intel->ctx, image, width, height, 1, 0, internalFormat, format); ctx->Driver.FreeTextureImageBuffer(ctx, image); intel_image->mt = intel_miptree_create_layout(intel, target, image->TexFormat, 0, 0, width, height, 1, true); if (intel_image->mt == NULL) return; intel_region_reference(&intel_image->mt->region, region); intel_image->mt->total_width = width; intel_image->mt->total_height = height; intel_image->mt->level[0].slice[0].x_offset = tile_x; intel_image->mt->level[0].slice[0].y_offset = tile_y; intel_miptree_get_tile_offsets(intel_image->mt, 0, 0, &draw_x, &draw_y); /* From "OES_EGL_image" error reporting. We report GL_INVALID_OPERATION * for EGL images from non-tile aligned sufaces in gen4 hw and earlier which has * trouble resolving back to destination image due to alignment issues. */ if (!has_surface_tile_offset && (draw_x != 0 || draw_y != 0)) { _mesa_error(ctx, GL_INVALID_OPERATION, __func__); intel_miptree_release(&intel_image->mt); return; } intel_texobj->needs_validate = true; intel_image->mt->offset = offset; assert(region->pitch % region->cpp == 0); intel_image->base.RowStride = region->pitch / region->cpp; /* Immediately validate the image to the object. */ intel_miptree_reference(&intel_texobj->mt, intel_image->mt); }
/** * Binds a region to a texture image, like it was uploaded by glTexImage2D(). * * Used for GLX_EXT_texture_from_pixmap and EGL image extensions, */ static void intel_set_texture_image_region(struct gl_context *ctx, struct gl_texture_image *image, struct intel_region *region, GLenum target, GLenum internalFormat, gl_format format) { struct intel_context *intel = intel_context(ctx); struct intel_texture_image *intel_image = intel_texture_image(image); struct gl_texture_object *texobj = image->TexObject; struct intel_texture_object *intel_texobj = intel_texture_object(texobj); _mesa_init_teximage_fields(&intel->ctx, image, region->width, region->height, 1, 0, internalFormat, format); ctx->Driver.FreeTextureImageBuffer(ctx, image); intel_image->mt = intel_miptree_create_for_region(intel, target, image->TexFormat, region); if (intel_image->mt == NULL) return; intel_image->base.RowStride = region->pitch; /* Immediately validate the image to the object. */ intel_miptree_reference(&intel_texobj->mt, intel_image->mt); }
/** * Copies the image's contents at its level into the object's miptree, * and updates the image to point at the object's miptree. */ static void copy_image_data_to_tree(struct intel_context *intel, struct intel_texture_object *intelObj, struct intel_texture_image *intelImage) { if (intelImage->mt) { /* Copy potentially with the blitter: */ intel_miptree_image_copy(intel, intelObj->mt, intelImage->face, intelImage->level, intelImage->mt); intel_miptree_release(intel, &intelImage->mt); } else { assert(intelImage->base.Data != NULL); /* More straightforward upload. */ intel_miptree_image_data(intel, intelObj->mt, intelImage->face, intelImage->level, intelImage->base.Data, intelImage->base.RowStride, intelImage->base.RowStride * intelImage->base.Height); _mesa_align_free(intelImage->base.Data); intelImage->base.Data = NULL; } intel_miptree_reference(&intelImage->mt, intelObj->mt); }
void intel_renderbuffer_move_to_temp(struct intel_context *intel, struct intel_renderbuffer *irb) { struct intel_texture_image *intel_image = intel_texture_image(irb->tex_image); struct intel_mipmap_tree *new_mt; int width, height, depth; intel_miptree_get_dimensions_for_image(irb->tex_image, &width, &height, &depth); new_mt = intel_miptree_create(intel, irb->tex_image->TexObject->Target, intel_image->base.Base.TexFormat, intel_image->base.Base.Level, intel_image->base.Base.Level, width, height, depth, true, irb->mt->num_samples, false /* force_y_tiling */); intel_miptree_copy_teximage(intel, intel_image, new_mt); intel_miptree_reference(&irb->mt, intel_image->mt); intel_renderbuffer_set_draw_offset(irb); intel_miptree_release(&new_mt); }
static GLboolean intel_texture_view(struct gl_context *ctx, struct gl_texture_object *texObj, struct gl_texture_object *origTexObj) { struct brw_context *brw = brw_context(ctx); struct intel_texture_object *intel_tex = intel_texture_object(texObj); struct intel_texture_object *intel_orig_tex = intel_texture_object(origTexObj); assert(intel_orig_tex->mt); intel_miptree_reference(&intel_tex->mt, intel_orig_tex->mt); /* Since we can only make views of immutable-format textures, * we can assume that everything is in origTexObj's miptree. * * Mesa core has already made us a copy of all the teximage objects, * except it hasn't copied our mt pointers, etc. */ const int numFaces = _mesa_num_tex_faces(texObj->Target); const int numLevels = texObj->NumLevels; int face; int level; for (face = 0; face < numFaces; face++) { for (level = 0; level < numLevels; level++) { struct gl_texture_image *image = texObj->Image[face][level]; struct intel_texture_image *intel_image = intel_texture_image(image); intel_miptree_reference(&intel_image->mt, intel_orig_tex->mt); } } /* The miptree is in a validated state, so no need to check later. */ intel_tex->needs_validate = false; intel_tex->validated_first_level = 0; intel_tex->validated_last_level = numLevels - 1; /* Set the validated texture format, with the same adjustments that * would have been applied to determine the underlying texture's * mt->format. */ intel_tex->_Format = intel_depth_format_for_depthstencil_format( intel_lower_compressed_format(brw, texObj->Image[0][0]->TexFormat)); return GL_TRUE; }
static bool intel_set_texture_storage_for_buffer_object(struct gl_context *ctx, struct gl_texture_object *tex_obj, struct gl_buffer_object *buffer_obj, uint32_t buffer_offset, uint32_t row_stride, bool read_only) { struct brw_context *brw = brw_context(ctx); struct intel_texture_object *intel_texobj = intel_texture_object(tex_obj); struct gl_texture_image *image = tex_obj->Image[0][0]; struct intel_texture_image *intel_image = intel_texture_image(image); struct intel_buffer_object *intel_buffer_obj = intel_buffer_object(buffer_obj); if (!read_only) { /* Renderbuffers have the restriction that the buffer offset and * surface pitch must be a multiple of the element size. If it's * not, we have to fail and fall back to software. */ int cpp = _mesa_get_format_bytes(image->TexFormat); if (buffer_offset % cpp || row_stride % cpp) { perf_debug("Bad PBO alignment; fallback to CPU mapping\n"); return false; } if (!brw->format_supported_as_render_target[image->TexFormat]) { perf_debug("Non-renderable PBO format; fallback to CPU mapping\n"); return false; } } assert(intel_texobj->mt == NULL); drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_buffer_obj, buffer_offset, row_stride * image->Height); intel_texobj->mt = intel_miptree_create_for_bo(brw, bo, image->TexFormat, buffer_offset, image->Width, image->Height, image->Depth, row_stride, 0); if (!intel_texobj->mt) return false; if (!_swrast_init_texture_image(image)) return false; intel_miptree_reference(&intel_image->mt, intel_texobj->mt); /* The miptree is in a validated state, so no need to check later. */ intel_texobj->needs_validate = false; intel_texobj->validated_first_level = 0; intel_texobj->validated_last_level = 0; intel_texobj->_Format = intel_texobj->mt->format; return true; }
/** * ctx->Driver.AllocTextureStorage() handler. * * Compare this to _mesa_alloc_texture_storage, which would call into * intel_alloc_texture_image_buffer() above. */ static GLboolean intel_alloc_texture_storage(struct gl_context *ctx, struct gl_texture_object *texobj, GLsizei levels, GLsizei width, GLsizei height, GLsizei depth) { struct brw_context *brw = brw_context(ctx); struct intel_texture_object *intel_texobj = intel_texture_object(texobj); struct gl_texture_image *first_image = texobj->Image[0][0]; int num_samples = intel_quantize_num_samples(brw->intelScreen, first_image->NumSamples); const int numFaces = _mesa_num_tex_faces(texobj->Target); int face; int level; /* If the object's current miptree doesn't match what we need, make a new * one. */ if (!intel_texobj->mt || !intel_miptree_match_image(intel_texobj->mt, first_image) || intel_texobj->mt->last_level != levels - 1) { intel_miptree_release(&intel_texobj->mt); intel_texobj->mt = intel_miptree_create(brw, texobj->Target, first_image->TexFormat, 0, levels - 1, width, height, depth, false, /* expect_accelerated */ num_samples, INTEL_MIPTREE_TILING_ANY, false); if (intel_texobj->mt == NULL) { return false; } } for (face = 0; face < numFaces; face++) { for (level = 0; level < levels; level++) { struct gl_texture_image *image = texobj->Image[face][level]; struct intel_texture_image *intel_image = intel_texture_image(image); image->NumSamples = num_samples; _swrast_free_texture_image_buffer(ctx, image); if (!_swrast_init_texture_image(image)) return false; intel_miptree_reference(&intel_image->mt, intel_texobj->mt); } } /* The miptree is in a validated state, so no need to check later. */ intel_texobj->needs_validate = false; intel_texobj->validated_first_level = 0; intel_texobj->validated_last_level = levels - 1; intel_texobj->_Format = intel_texobj->mt->format; return true; }
static bool intel_renderbuffer_update_wrapper(struct intel_context *intel, struct intel_renderbuffer *irb, struct gl_texture_image *image, uint32_t layer) { struct gl_renderbuffer *rb = &irb->Base.Base; struct intel_texture_image *intel_image = intel_texture_image(image); struct intel_mipmap_tree *mt = intel_image->mt; int level = image->Level; rb->Format = image->TexFormat; rb->InternalFormat = image->InternalFormat; rb->_BaseFormat = image->_BaseFormat; rb->NumSamples = mt->num_samples; if (mt->msaa_layout != INTEL_MSAA_LAYOUT_NONE) { assert(level == 0); rb->Width = mt->logical_width0; rb->Height = mt->logical_height0; } else { rb->Width = mt->level[level].width; rb->Height = mt->level[level].height; } rb->Delete = intel_delete_renderbuffer; rb->AllocStorage = intel_nop_alloc_storage; intel_miptree_check_level_layer(mt, level, layer); irb->mt_level = level; switch (mt->msaa_layout) { case INTEL_MSAA_LAYOUT_UMS: case INTEL_MSAA_LAYOUT_CMS: irb->mt_layer = layer * mt->num_samples; break; default: irb->mt_layer = layer; } intel_miptree_reference(&irb->mt, mt); intel_renderbuffer_set_draw_offset(irb); if (mt->hiz_mt == NULL && intel->vtbl.is_hiz_depth_format(intel, rb->Format)) { intel_miptree_alloc_hiz(intel, mt, 0 /* num_samples */); if (!mt->hiz_mt) return false; } return true; }
/** * Creates a new named renderbuffer that wraps the first slice * of an existing miptree. * * Clobbers the current renderbuffer binding (ctx->CurrentRenderbuffer). */ GLuint brw_get_rb_for_slice(struct brw_context *brw, struct intel_mipmap_tree *mt, unsigned level, unsigned layer, bool flat) { struct gl_context *ctx = &brw->ctx; GLuint rbo; struct gl_renderbuffer *rb; struct intel_renderbuffer *irb; /* This turns the GenRenderbuffers name into an actual struct * intel_renderbuffer. */ _mesa_GenRenderbuffers(1, &rbo); _mesa_BindRenderbuffer(GL_RENDERBUFFER, rbo); rb = ctx->CurrentRenderbuffer; irb = intel_renderbuffer(rb); rb->Format = mt->format; rb->_BaseFormat = _mesa_get_format_base_format(mt->format); /* Program takes care of msaa and mip-level access manually for stencil. * The surface is also treated as Y-tiled instead of as W-tiled calling for * twice the width and half the height in dimensions. */ if (flat) { const unsigned halign_stencil = 8; rb->NumSamples = 0; rb->Width = ALIGN(mt->total_width, halign_stencil) * 2; rb->Height = (mt->total_height / mt->physical_depth0) / 2; irb->mt_level = 0; } else { rb->NumSamples = mt->num_samples; rb->Width = mt->logical_width0; rb->Height = mt->logical_height0; irb->mt_level = level; } irb->mt_layer = layer; intel_miptree_reference(&irb->mt, mt); return rbo; }
static bool intel_renderbuffer_update_wrapper(struct brw_context *brw, struct intel_renderbuffer *irb, struct gl_texture_image *image, uint32_t layer) { struct gl_renderbuffer *rb = &irb->Base.Base; struct intel_texture_image *intel_image = intel_texture_image(image); struct intel_mipmap_tree *mt = intel_image->mt; int level = image->Level; rb->Depth = image->Depth; rb->AllocStorage = intel_nop_alloc_storage; intel_miptree_check_level_layer(mt, level, layer); irb->mt_level = level; switch (mt->msaa_layout) { case INTEL_MSAA_LAYOUT_UMS: case INTEL_MSAA_LAYOUT_CMS: irb->mt_layer = layer * mt->num_samples; break; default: irb->mt_layer = layer; } intel_miptree_reference(&irb->mt, mt); intel_renderbuffer_set_draw_offset(irb); if (mt->hiz_mt == NULL && brw_is_hiz_depth_format(brw, rb->Format)) { intel_miptree_alloc_hiz(brw, mt); if (!mt->hiz_mt) return false; } return true; }
/** * Copies the image's current data to the given miptree, and associates that * miptree with the image. * * If \c invalidate is true, then the actual image data does not need to be * copied, but the image still needs to be associated to the new miptree (this * is set to true if we're about to clear the image). */ void intel_miptree_copy_teximage(struct intel_context *intel, struct intel_texture_image *intelImage, struct intel_mipmap_tree *dst_mt, bool invalidate) { struct intel_mipmap_tree *src_mt = intelImage->mt; struct intel_texture_object *intel_obj = intel_texture_object(intelImage->base.Base.TexObject); int level = intelImage->base.Base.Level; int face = intelImage->base.Base.Face; GLuint depth = intelImage->base.Base.Depth; if (!invalidate) { for (int slice = 0; slice < depth; slice++) { intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice); } } intel_miptree_reference(&intelImage->mt, dst_mt); intel_obj->needs_validate = true; }
static bool intel_renderbuffer_update_wrapper(struct intel_context *intel, struct intel_renderbuffer *irb, struct gl_texture_image *image, uint32_t layer) { struct gl_renderbuffer *rb = &irb->Base.Base; struct intel_texture_image *intel_image = intel_texture_image(image); struct intel_mipmap_tree *mt = intel_image->mt; int level = image->Level; rb->AllocStorage = intel_nop_alloc_storage; intel_miptree_check_level_layer(mt, level, layer); irb->mt_level = level; irb->mt_layer = layer; intel_miptree_reference(&irb->mt, mt); intel_renderbuffer_set_draw_offset(irb); return true; }
static bool intel_renderbuffer_update_wrapper(struct intel_context *intel, struct intel_renderbuffer *irb, struct gl_texture_image *image, uint32_t layer) { struct gl_renderbuffer *rb = &irb->Base.Base; struct intel_texture_image *intel_image = intel_texture_image(image); struct intel_mipmap_tree *mt = intel_image->mt; int level = image->Level; rb->Format = image->TexFormat; rb->InternalFormat = image->InternalFormat; rb->_BaseFormat = image->_BaseFormat; rb->Width = mt->level[level].width; rb->Height = mt->level[level].height; rb->Delete = intel_delete_renderbuffer; rb->AllocStorage = intel_nop_alloc_storage; intel_miptree_check_level_layer(mt, level, layer); irb->mt_level = level; irb->mt_layer = layer; intel_miptree_reference(&irb->mt, mt); intel_renderbuffer_set_draw_offset(irb); if (mt->hiz_mt == NULL && intel->vtbl.is_hiz_depth_format(intel, rb->Format)) { intel_miptree_alloc_hiz(intel, mt, 0 /* num_samples */); if (!mt->hiz_mt) return false; } return true; }
GLuint intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit) { struct gl_texture_object *tObj = intel->ctx.Texture.Unit[unit]._Current; struct intel_texture_object *intelObj = intel_texture_object(tObj); int comp_byte = 0; int cpp; GLuint face, i; GLuint nr_faces = 0; struct intel_texture_image *firstImage; /* We know/require this is true by now: */ assert(intelObj->base._Complete); /* What levels must the tree include at a minimum? */ intel_calculate_first_last_level(intelObj); firstImage = intel_texture_image(intelObj->base.Image[0][intelObj->firstLevel]); /* Fallback case: */ if (firstImage->base.Border) { if (intelObj->mt) { intel_miptree_release(intel, &intelObj->mt); } return GL_FALSE; } /* If both firstImage and intelObj have a tree which can contain * all active images, favour firstImage. Note that because of the * completeness requirement, we know that the image dimensions * will match. */ if (firstImage->mt && firstImage->mt != intelObj->mt && firstImage->mt->first_level <= intelObj->firstLevel && firstImage->mt->last_level >= intelObj->lastLevel) { if (intelObj->mt) intel_miptree_release(intel, &intelObj->mt); intel_miptree_reference(&intelObj->mt, firstImage->mt); } if (firstImage->base.IsCompressed) { comp_byte = intel_compressed_num_bytes(firstImage->base.TexFormat->MesaFormat); cpp = comp_byte; } else cpp = firstImage->base.TexFormat->TexelBytes; /* Check tree can hold all active levels. Check tree matches * target, imageFormat, etc. * * XXX: For some layouts (eg i945?), the test might have to be * first_level == firstLevel, as the tree isn't valid except at the * original start level. Hope to get around this by * programming minLod, maxLod, baseLevel into the hardware and * leaving the tree alone. */ if (intelObj->mt && (intelObj->mt->target != intelObj->base.Target || intelObj->mt->internal_format != firstImage->base.InternalFormat || intelObj->mt->first_level != intelObj->firstLevel || intelObj->mt->last_level != intelObj->lastLevel || intelObj->mt->width0 != firstImage->base.Width || intelObj->mt->height0 != firstImage->base.Height || intelObj->mt->depth0 != firstImage->base.Depth || intelObj->mt->cpp != cpp || intelObj->mt->compressed != firstImage->base.IsCompressed)) { intel_miptree_release(intel, &intelObj->mt); } /* May need to create a new tree: */ if (!intelObj->mt) { intelObj->mt = intel_miptree_create(intel, intelObj->base.Target, firstImage->base.InternalFormat, intelObj->firstLevel, intelObj->lastLevel, firstImage->base.Width, firstImage->base.Height, firstImage->base.Depth, cpp, comp_byte, GL_TRUE); } /* Pull in any images not in the object's tree: */ nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1; for (face = 0; face < nr_faces; face++) { for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) { struct intel_texture_image *intelImage = intel_texture_image(intelObj->base.Image[face][i]); /* Need to import images in main memory or held in other trees. */ if (intelObj->mt != intelImage->mt) { copy_image_data_to_tree(intel, intelObj, intelImage); } } } return GL_TRUE; }
static void intelTexImage(GLcontext * ctx, GLint dims, GLenum target, GLint level, GLint internalFormat, GLint width, GLint height, GLint depth, GLint border, GLenum format, GLenum type, const void *pixels, const struct gl_pixelstore_attrib *unpack, struct gl_texture_object *texObj, struct gl_texture_image *texImage, GLsizei imageSize, int compressed) { struct intel_context *intel = intel_context(ctx); struct intel_texture_object *intelObj = intel_texture_object(texObj); struct intel_texture_image *intelImage = intel_texture_image(texImage); GLint postConvWidth = width; GLint postConvHeight = height; GLint texelBytes, sizeInBytes; GLuint dstRowStride; DBG("%s target %s level %d %dx%dx%d border %d\n", __FUNCTION__, _mesa_lookup_enum_by_nr(target), level, width, height, depth, border); intelFlush(ctx); intelImage->face = target_to_face(target); intelImage->level = level; if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) { _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth, &postConvHeight); } /* choose the texture format */ texImage->TexFormat = intelChooseTextureFormat(ctx, internalFormat, format, type); _mesa_set_fetch_functions(texImage, dims); if (texImage->TexFormat->TexelBytes == 0) { /* must be a compressed format */ texelBytes = 0; texImage->IsCompressed = GL_TRUE; texImage->CompressedSize = ctx->Driver.CompressedTextureSize(ctx, texImage->Width, texImage->Height, texImage->Depth, texImage->TexFormat->MesaFormat); } else { texelBytes = texImage->TexFormat->TexelBytes; /* Minimum pitch of 32 bytes */ if (postConvWidth * texelBytes < 32) { postConvWidth = 32 / texelBytes; texImage->RowStride = postConvWidth; } assert(texImage->RowStride == postConvWidth); } /* Release the reference to a potentially orphaned buffer. * Release any old malloced memory. */ if (intelImage->mt) { intel_miptree_release(intel, &intelImage->mt); assert(!texImage->Data); } else if (texImage->Data) { _mesa_align_free(texImage->Data); } /* If this is the only texture image in the tree, could call * bmBufferData with NULL data to free the old block and avoid * waiting on any outstanding fences. */ if (intelObj->mt && intelObj->mt->first_level == level && intelObj->mt->last_level == level && intelObj->mt->target != GL_TEXTURE_CUBE_MAP_ARB && !intel_miptree_match_image(intelObj->mt, &intelImage->base, intelImage->face, intelImage->level)) { DBG("release it\n"); intel_miptree_release(intel, &intelObj->mt); assert(!intelObj->mt); } if (!intelObj->mt) { guess_and_alloc_mipmap_tree(intel, intelObj, intelImage); if (!intelObj->mt) { DBG("guess_and_alloc_mipmap_tree: failed\n"); } } assert(!intelImage->mt); if (intelObj->mt && intel_miptree_match_image(intelObj->mt, &intelImage->base, intelImage->face, intelImage->level)) { intel_miptree_reference(&intelImage->mt, intelObj->mt); assert(intelImage->mt); } if (!intelImage->mt) DBG("XXX: Image did not fit into tree - storing in local memory!\n"); /* PBO fastpaths: */ if (dims <= 2 && intelImage->mt && intel_buffer_object(unpack->BufferObj) && check_pbo_format(internalFormat, format, type, intelImage->base.TexFormat)) { DBG("trying pbo upload\n"); /* Attempt to texture directly from PBO data (zero copy upload). * * Currently disable as it can lead to worse as well as better * performance (in particular when intel_region_cow() is * required). */ if (intelObj->mt == intelImage->mt && intelObj->mt->first_level == level && intelObj->mt->last_level == level) { if (try_pbo_zcopy(intel, intelImage, unpack, internalFormat, width, height, format, type, pixels)) { DBG("pbo zcopy upload succeeded\n"); return; } } /* Otherwise, attempt to use the blitter for PBO image uploads. */ if (try_pbo_upload(intel, intelImage, unpack, internalFormat, width, height, format, type, pixels)) { DBG("pbo upload succeeded\n"); return; } DBG("pbo upload failed\n"); } /* intelCopyTexImage calls this function with pixels == NULL, with * the expectation that the mipmap tree will be set up but nothing * more will be done. This is where those calls return: */ if (compressed) { pixels = _mesa_validate_pbo_compressed_teximage(ctx, imageSize, pixels, unpack, "glCompressedTexImage"); } else { pixels = _mesa_validate_pbo_teximage(ctx, dims, width, height, 1, format, type, pixels, unpack, "glTexImage"); } if (!pixels) return; if (intelImage->mt) intel_region_idle(intel->intelScreen, intelImage->mt->region); LOCK_HARDWARE(intel); if (intelImage->mt) { texImage->Data = intel_miptree_image_map(intel, intelImage->mt, intelImage->face, intelImage->level, &dstRowStride, intelImage->base.ImageOffsets); } else { /* Allocate regular memory and store the image there temporarily. */ if (texImage->IsCompressed) { sizeInBytes = texImage->CompressedSize; dstRowStride = _mesa_compressed_row_stride(texImage->TexFormat->MesaFormat, width); assert(dims != 3); } else { dstRowStride = postConvWidth * texelBytes; sizeInBytes = depth * dstRowStride * postConvHeight; } texImage->Data = malloc(sizeInBytes); } DBG("Upload image %dx%dx%d row_len %x " "pitch %x\n", width, height, depth, width * texelBytes, dstRowStride); /* Copy data. Would like to know when it's ok for us to eg. use * the blitter to copy. Or, use the hardware to do the format * conversion and copy: */ if (compressed) { memcpy(texImage->Data, pixels, imageSize); } else if (!texImage->TexFormat->StoreImage(ctx, dims, texImage->_BaseFormat, texImage->TexFormat, texImage->Data, 0, 0, 0, /* dstX/Y/Zoffset */ dstRowStride, texImage->ImageOffsets, width, height, depth, format, type, pixels, unpack)) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage"); } _mesa_unmap_teximage_pbo(ctx, unpack); if (intelImage->mt) { intel_miptree_image_unmap(intel, intelImage->mt); texImage->Data = NULL; } UNLOCK_HARDWARE(intel); #if 0 /* GL_SGIS_generate_mipmap -- this can be accelerated now. */ if (level == texObj->BaseLevel && texObj->GenerateMipmap) { intel_generate_mipmap(ctx, target, &ctx->Texture.Unit[ctx->Texture.CurrentUnit], texObj); } #endif }
void brw_workaround_depthstencil_alignment(struct brw_context *brw) { struct intel_context *intel = &brw->intel; struct gl_context *ctx = &intel->ctx; struct gl_framebuffer *fb = ctx->DrawBuffer; bool rebase_depth = false; bool rebase_stencil = false; struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH); struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL); struct intel_mipmap_tree *depth_mt = NULL; struct intel_mipmap_tree *stencil_mt = get_stencil_miptree(stencil_irb); uint32_t tile_x = 0, tile_y = 0, stencil_tile_x = 0, stencil_tile_y = 0; uint32_t stencil_draw_x = 0, stencil_draw_y = 0; if (depth_irb) depth_mt = depth_irb->mt; uint32_t tile_mask_x, tile_mask_y; brw_get_depthstencil_tile_masks(depth_mt, stencil_mt, &tile_mask_x, &tile_mask_y); if (depth_irb) { tile_x = depth_irb->draw_x & tile_mask_x; tile_y = depth_irb->draw_y & tile_mask_y; /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327 * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth * Coordinate Offset X/Y": * * "The 3 LSBs of both offsets must be zero to ensure correct * alignment" */ if (tile_x & 7 || tile_y & 7) rebase_depth = true; /* We didn't even have intra-tile offsets before g45. */ if (intel->gen == 4 && !intel->is_g4x) { if (tile_x || tile_y) rebase_depth = true; } if (rebase_depth) { perf_debug("HW workaround: blitting depth level %d to a temporary " "to fix alignment (depth tile offset %d,%d)\n", depth_irb->mt_level, tile_x, tile_y); intel_renderbuffer_move_to_temp(intel, depth_irb); /* In the case of stencil_irb being the same packed depth/stencil * texture but not the same rb, make it point at our rebased mt, too. */ if (stencil_irb && stencil_irb != depth_irb && stencil_irb->mt == depth_mt) { intel_miptree_reference(&stencil_irb->mt, depth_irb->mt); intel_renderbuffer_set_draw_offset(stencil_irb); } stencil_mt = get_stencil_miptree(stencil_irb); tile_x = depth_irb->draw_x & tile_mask_x; tile_y = depth_irb->draw_y & tile_mask_y; } if (stencil_irb) { stencil_mt = get_stencil_miptree(stencil_irb); intel_miptree_get_image_offset(stencil_mt, stencil_irb->mt_level, stencil_irb->mt_layer, &stencil_draw_x, &stencil_draw_y); int stencil_tile_x = stencil_draw_x & tile_mask_x; int stencil_tile_y = stencil_draw_y & tile_mask_y; /* If stencil doesn't match depth, then we'll need to rebase stencil * as well. (if we hadn't decided to rebase stencil before, the * post-stencil depth test will also rebase depth to try to match it * up). */ if (tile_x != stencil_tile_x || tile_y != stencil_tile_y) { rebase_stencil = true; } } } /* If we have (just) stencil, check it for ignored low bits as well */ if (stencil_irb) { intel_miptree_get_image_offset(stencil_mt, stencil_irb->mt_level, stencil_irb->mt_layer, &stencil_draw_x, &stencil_draw_y); stencil_tile_x = stencil_draw_x & tile_mask_x; stencil_tile_y = stencil_draw_y & tile_mask_y; if (stencil_tile_x & 7 || stencil_tile_y & 7) rebase_stencil = true; if (intel->gen == 4 && !intel->is_g4x) { if (stencil_tile_x || stencil_tile_y) rebase_stencil = true; } } if (rebase_stencil) { perf_debug("HW workaround: blitting stencil level %d to a temporary " "to fix alignment (stencil tile offset %d,%d)\n", stencil_irb->mt_level, stencil_tile_x, stencil_tile_y); intel_renderbuffer_move_to_temp(intel, stencil_irb); stencil_mt = get_stencil_miptree(stencil_irb); intel_miptree_get_image_offset(stencil_mt, stencil_irb->mt_level, stencil_irb->mt_layer, &stencil_draw_x, &stencil_draw_y); stencil_tile_x = stencil_draw_x & tile_mask_x; stencil_tile_y = stencil_draw_y & tile_mask_y; if (depth_irb && depth_irb->mt == stencil_irb->mt) { intel_miptree_reference(&depth_irb->mt, stencil_irb->mt); intel_renderbuffer_set_draw_offset(depth_irb); } else if (depth_irb && !rebase_depth) { if (tile_x != stencil_tile_x || tile_y != stencil_tile_y) { perf_debug("HW workaround: blitting depth level %d to a temporary " "to match stencil level %d alignment (depth tile offset " "%d,%d, stencil offset %d,%d)\n", depth_irb->mt_level, stencil_irb->mt_level, tile_x, tile_y, stencil_tile_x, stencil_tile_y); intel_renderbuffer_move_to_temp(intel, depth_irb); tile_x = depth_irb->draw_x & tile_mask_x; tile_y = depth_irb->draw_y & tile_mask_y; if (stencil_irb && stencil_irb->mt == depth_mt) { intel_miptree_reference(&stencil_irb->mt, depth_irb->mt); intel_renderbuffer_set_draw_offset(stencil_irb); } WARN_ONCE(stencil_tile_x != tile_x || stencil_tile_y != tile_y, "Rebased stencil tile offset (%d,%d) doesn't match depth " "tile offset (%d,%d).\n", stencil_tile_x, stencil_tile_y, tile_x, tile_y); } } } if (!depth_irb) { tile_x = stencil_tile_x; tile_y = stencil_tile_y; } /* While we just tried to get everything aligned, we may have failed to do * so in the case of rendering to array or 3D textures, where nonzero faces * will still have an offset post-rebase. At least give an informative * warning. */ WARN_ONCE((tile_x & 7) || (tile_y & 7), "Depth/stencil buffer needs alignment to 8-pixel boundaries.\n" "Truncating offset, bad rendering may occur.\n"); tile_x &= ~7; tile_y &= ~7; /* Now, after rebasing, save off the new dephtstencil state so the hardware * packets can just dereference that without re-calculating tile offsets. */ brw->depthstencil.tile_x = tile_x; brw->depthstencil.tile_y = tile_y; brw->depthstencil.depth_offset = 0; brw->depthstencil.stencil_offset = 0; brw->depthstencil.hiz_offset = 0; brw->depthstencil.depth_mt = NULL; brw->depthstencil.stencil_mt = NULL; brw->depthstencil.hiz_mt = NULL; if (depth_irb) { depth_mt = depth_irb->mt; brw->depthstencil.depth_mt = depth_mt; brw->depthstencil.depth_offset = intel_region_get_aligned_offset(depth_mt->region, depth_irb->draw_x & ~tile_mask_x, depth_irb->draw_y & ~tile_mask_y, false); if (depth_mt->hiz_mt) { brw->depthstencil.hiz_mt = depth_mt->hiz_mt; brw->depthstencil.hiz_offset = intel_region_get_aligned_offset(depth_mt->region, depth_irb->draw_x & ~tile_mask_x, (depth_irb->draw_y & ~tile_mask_y) / 2, false); } } if (stencil_irb) { stencil_mt = get_stencil_miptree(stencil_irb); brw->depthstencil.stencil_mt = stencil_mt; if (stencil_mt->format == MESA_FORMAT_S8) { /* Note: we can't compute the stencil offset using * intel_region_get_aligned_offset(), because stencil_region claims * that the region is untiled even though it's W tiled. */ brw->depthstencil.stencil_offset = (stencil_draw_y & ~tile_mask_y) * stencil_mt->region->pitch + (stencil_draw_x & ~tile_mask_x) * 64; } } }
void brw_workaround_depthstencil_alignment(struct brw_context *brw, GLbitfield clear_mask) { struct gl_context *ctx = &brw->ctx; struct gl_framebuffer *fb = ctx->DrawBuffer; bool rebase_depth = false; bool rebase_stencil = false; struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH); struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL); struct intel_mipmap_tree *depth_mt = NULL; struct intel_mipmap_tree *stencil_mt = get_stencil_miptree(stencil_irb); uint32_t tile_x = 0, tile_y = 0, stencil_tile_x = 0, stencil_tile_y = 0; uint32_t stencil_draw_x = 0, stencil_draw_y = 0; bool invalidate_depth = clear_mask & BUFFER_BIT_DEPTH; bool invalidate_stencil = clear_mask & BUFFER_BIT_STENCIL; if (depth_irb) depth_mt = depth_irb->mt; /* Initialize brw->depthstencil to 'nop' workaround state. */ brw->depthstencil.tile_x = 0; brw->depthstencil.tile_y = 0; brw->depthstencil.depth_offset = 0; brw->depthstencil.stencil_offset = 0; brw->depthstencil.hiz_offset = 0; brw->depthstencil.depth_mt = NULL; brw->depthstencil.stencil_mt = NULL; if (depth_irb) brw->depthstencil.depth_mt = depth_mt; if (stencil_irb) brw->depthstencil.stencil_mt = get_stencil_miptree(stencil_irb); /* Gen7+ doesn't require the workarounds, since we always program the * surface state at the start of the whole surface. */ if (brw->gen >= 7) return; /* Check if depth buffer is in depth/stencil format. If so, then it's only * safe to invalidate it if we're also clearing stencil, and both depth_irb * and stencil_irb point to the same miptree. * * Note: it's not sufficient to check for the case where * _mesa_get_format_base_format(depth_mt->format) == GL_DEPTH_STENCIL, * because this fails to catch depth/stencil buffers on hardware that uses * separate stencil. To catch that case, we check whether * depth_mt->stencil_mt is non-NULL. */ if (depth_irb && invalidate_depth && (_mesa_get_format_base_format(depth_mt->format) == GL_DEPTH_STENCIL || depth_mt->stencil_mt)) { invalidate_depth = invalidate_stencil && depth_irb && stencil_irb && depth_irb->mt == stencil_irb->mt; } uint32_t tile_mask_x, tile_mask_y; brw_get_depthstencil_tile_masks(depth_mt, depth_mt ? depth_irb->mt_level : 0, depth_mt ? depth_irb->mt_layer : 0, stencil_mt, &tile_mask_x, &tile_mask_y); if (depth_irb) { tile_x = depth_irb->draw_x & tile_mask_x; tile_y = depth_irb->draw_y & tile_mask_y; /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327 * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth * Coordinate Offset X/Y": * * "The 3 LSBs of both offsets must be zero to ensure correct * alignment" */ if (tile_x & 7 || tile_y & 7) rebase_depth = true; /* We didn't even have intra-tile offsets before g45. */ if (!brw->has_surface_tile_offset) { if (tile_x || tile_y) rebase_depth = true; } if (rebase_depth) { perf_debug("HW workaround: blitting depth level %d to a temporary " "to fix alignment (depth tile offset %d,%d)\n", depth_irb->mt_level, tile_x, tile_y); intel_renderbuffer_move_to_temp(brw, depth_irb, invalidate_depth); /* In the case of stencil_irb being the same packed depth/stencil * texture but not the same rb, make it point at our rebased mt, too. */ if (stencil_irb && stencil_irb != depth_irb && stencil_irb->mt == depth_mt) { intel_miptree_reference(&stencil_irb->mt, depth_irb->mt); intel_renderbuffer_set_draw_offset(stencil_irb); } stencil_mt = get_stencil_miptree(stencil_irb); tile_x = depth_irb->draw_x & tile_mask_x; tile_y = depth_irb->draw_y & tile_mask_y; } if (stencil_irb) { stencil_mt = get_stencil_miptree(stencil_irb); intel_miptree_get_image_offset(stencil_mt, stencil_irb->mt_level, stencil_irb->mt_layer, &stencil_draw_x, &stencil_draw_y); int stencil_tile_x = stencil_draw_x & tile_mask_x; int stencil_tile_y = stencil_draw_y & tile_mask_y; /* If stencil doesn't match depth, then we'll need to rebase stencil * as well. (if we hadn't decided to rebase stencil before, the * post-stencil depth test will also rebase depth to try to match it * up). */ if (tile_x != stencil_tile_x || tile_y != stencil_tile_y) { rebase_stencil = true; } } } /* If we have (just) stencil, check it for ignored low bits as well */ if (stencil_irb) { intel_miptree_get_image_offset(stencil_mt, stencil_irb->mt_level, stencil_irb->mt_layer, &stencil_draw_x, &stencil_draw_y); stencil_tile_x = stencil_draw_x & tile_mask_x; stencil_tile_y = stencil_draw_y & tile_mask_y; if (stencil_tile_x & 7 || stencil_tile_y & 7) rebase_stencil = true; if (!brw->has_surface_tile_offset) { if (stencil_tile_x || stencil_tile_y) rebase_stencil = true; } } if (rebase_stencil) { perf_debug("HW workaround: blitting stencil level %d to a temporary " "to fix alignment (stencil tile offset %d,%d)\n", stencil_irb->mt_level, stencil_tile_x, stencil_tile_y); intel_renderbuffer_move_to_temp(brw, stencil_irb, invalidate_stencil); stencil_mt = get_stencil_miptree(stencil_irb); intel_miptree_get_image_offset(stencil_mt, stencil_irb->mt_level, stencil_irb->mt_layer, &stencil_draw_x, &stencil_draw_y); stencil_tile_x = stencil_draw_x & tile_mask_x; stencil_tile_y = stencil_draw_y & tile_mask_y; if (depth_irb && depth_irb->mt == stencil_irb->mt) { intel_miptree_reference(&depth_irb->mt, stencil_irb->mt); intel_renderbuffer_set_draw_offset(depth_irb); } else if (depth_irb && !rebase_depth) { if (tile_x != stencil_tile_x || tile_y != stencil_tile_y) { perf_debug("HW workaround: blitting depth level %d to a temporary " "to match stencil level %d alignment (depth tile offset " "%d,%d, stencil offset %d,%d)\n", depth_irb->mt_level, stencil_irb->mt_level, tile_x, tile_y, stencil_tile_x, stencil_tile_y); intel_renderbuffer_move_to_temp(brw, depth_irb, invalidate_depth); tile_x = depth_irb->draw_x & tile_mask_x; tile_y = depth_irb->draw_y & tile_mask_y; if (stencil_irb && stencil_irb->mt == depth_mt) { intel_miptree_reference(&stencil_irb->mt, depth_irb->mt); intel_renderbuffer_set_draw_offset(stencil_irb); } WARN_ONCE(stencil_tile_x != tile_x || stencil_tile_y != tile_y, "Rebased stencil tile offset (%d,%d) doesn't match depth " "tile offset (%d,%d).\n", stencil_tile_x, stencil_tile_y, tile_x, tile_y); } } } if (!depth_irb) { tile_x = stencil_tile_x; tile_y = stencil_tile_y; } /* While we just tried to get everything aligned, we may have failed to do * so in the case of rendering to array or 3D textures, where nonzero faces * will still have an offset post-rebase. At least give an informative * warning. */ WARN_ONCE((tile_x & 7) || (tile_y & 7), "Depth/stencil buffer needs alignment to 8-pixel boundaries.\n" "Truncating offset, bad rendering may occur.\n"); tile_x &= ~7; tile_y &= ~7; /* Now, after rebasing, save off the new dephtstencil state so the hardware * packets can just dereference that without re-calculating tile offsets. */ brw->depthstencil.tile_x = tile_x; brw->depthstencil.tile_y = tile_y; if (depth_irb) { depth_mt = depth_irb->mt; brw->depthstencil.depth_mt = depth_mt; brw->depthstencil.depth_offset = intel_region_get_aligned_offset(depth_mt->region, depth_irb->draw_x & ~tile_mask_x, depth_irb->draw_y & ~tile_mask_y, false); if (intel_renderbuffer_has_hiz(depth_irb)) { brw->depthstencil.hiz_offset = intel_region_get_aligned_offset(depth_mt->region, depth_irb->draw_x & ~tile_mask_x, (depth_irb->draw_y & ~tile_mask_y) / 2, false); } } if (stencil_irb) { stencil_mt = get_stencil_miptree(stencil_irb); brw->depthstencil.stencil_mt = stencil_mt; if (stencil_mt->format == MESA_FORMAT_S_UINT8) { /* Note: we can't compute the stencil offset using * intel_region_get_aligned_offset(), because stencil_region claims * that the region is untiled even though it's W tiled. */ brw->depthstencil.stencil_offset = (stencil_draw_y & ~tile_mask_y) * stencil_mt->region->pitch + (stencil_draw_x & ~tile_mask_x) * 64; } } }
/** * Called by glFramebufferTexture[123]DEXT() (and other places) to * prepare for rendering into texture memory. This might be called * many times to choose different texture levels, cube faces, etc * before intel_finish_render_texture() is ever called. */ static void intel_render_texture(struct gl_context * ctx, struct gl_framebuffer *fb, struct gl_renderbuffer_attachment *att) { struct intel_context *intel = intel_context(ctx); struct gl_texture_image *image = _mesa_get_attachment_teximage(att); struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer); struct intel_texture_image *intel_image = intel_texture_image(image); struct intel_mipmap_tree *mt = intel_image->mt; int layer; (void) fb; if (att->CubeMapFace > 0) { assert(att->Zoffset == 0); layer = att->CubeMapFace; } else { layer = att->Zoffset; } if (!intel_image->mt) { /* Fallback on drawing to a texture that doesn't have a miptree * (has a border, width/height 0, etc.) */ _mesa_reference_renderbuffer(&att->Renderbuffer, NULL); _swrast_render_texture(ctx, fb, att); return; } else if (!irb) { intel_miptree_check_level_layer(mt, att->TextureLevel, layer); irb = (struct intel_renderbuffer *)intel_new_renderbuffer(ctx, ~0); if (irb) { /* bind the wrapper to the attachment point */ _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base.Base); } else { /* fallback to software rendering */ _swrast_render_texture(ctx, fb, att); return; } } if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) { _mesa_reference_renderbuffer(&att->Renderbuffer, NULL); _swrast_render_texture(ctx, fb, att); return; } DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n", _mesa_get_format_name(image->TexFormat), att->Texture->Name, image->Width, image->Height, irb->Base.Base.RefCount); intel_image->used_as_render_target = true; #ifndef I915 if (need_tile_offset_workaround(brw_context(ctx), irb)) { /* Original gen4 hardware couldn't draw to a non-tile-aligned * destination in a miptree unless you actually setup your * renderbuffer as a miptree and used the fragile * lod/array_index/etc. controls to select the image. So, * instead, we just make a new single-level miptree and render * into that. */ struct intel_context *intel = intel_context(ctx); struct intel_mipmap_tree *new_mt; int width, height, depth; intel_miptree_get_dimensions_for_image(image, &width, &height, &depth); new_mt = intel_miptree_create(intel, image->TexObject->Target, intel_image->base.Base.TexFormat, intel_image->base.Base.Level, intel_image->base.Base.Level, width, height, depth, true); intel_miptree_copy_teximage(intel, intel_image, new_mt); intel_renderbuffer_set_draw_offset(irb); intel_miptree_reference(&irb->mt, intel_image->mt); intel_miptree_release(&new_mt); } #endif /* update drawing region, etc */ intel_draw_buffer(ctx); }