/** * Map or unmap all the renderbuffers which we may need during * software rendering. * XXX in the future, we could probably convey extra information to * reduce the number of mappings needed. I.e. if doing a glReadPixels * from the depth buffer, we really only need one mapping. * * XXX Rewrite this function someday. * We can probably just loop over all the renderbuffer attachments, * map/unmap all of them, and not worry about the _ColorDrawBuffers * _ColorReadBuffer, _DepthBuffer or _StencilBuffer fields. */ static void intel_map_unmap_framebuffer(struct intel_context *intel, struct gl_framebuffer *fb, GLboolean map) { GLuint i; /* color draw buffers */ for (i = 0; i < fb->_NumColorDrawBuffers; i++) { if (map) intel_renderbuffer_map(intel, fb->_ColorDrawBuffers[i]); else intel_renderbuffer_unmap(intel, fb->_ColorDrawBuffers[i]); } /* color read buffer */ if (map) intel_renderbuffer_map(intel, fb->_ColorReadBuffer); else intel_renderbuffer_unmap(intel, fb->_ColorReadBuffer); /* check for render to textures */ for (i = 0; i < BUFFER_COUNT; i++) { struct gl_renderbuffer_attachment *att = fb->Attachment + i; struct gl_texture_object *tex = att->Texture; if (tex) { /* render to texture */ ASSERT(att->Renderbuffer); if (map) intel_tex_map_images(intel, intel_texture_object(tex)); else intel_tex_unmap_images(intel, intel_texture_object(tex)); } } /* depth buffer (Note wrapper!) */ if (fb->_DepthBuffer) { if (map) intel_renderbuffer_map(intel, fb->_DepthBuffer->Wrapped); else intel_renderbuffer_unmap(intel, fb->_DepthBuffer->Wrapped); } /* stencil buffer (Note wrapper!) */ if (fb->_StencilBuffer) { if (map) intel_renderbuffer_map(intel, fb->_StencilBuffer->Wrapped); else intel_renderbuffer_unmap(intel, fb->_StencilBuffer->Wrapped); } intel_check_front_buffer_rendering(intel); }
/** * Called when done softare rendering. Unmap the buffers we mapped in * the above function. */ void intelSpanRenderFinish(GLcontext * ctx) { struct intel_context *intel = intel_context(ctx); GLuint i; _swrast_flush(ctx); /* Now unmap the framebuffer: */ #if 0 intel_region_unmap(intel, intel->front_region); intel_region_unmap(intel, intel->back_region); intel_region_unmap(intel, intel->intelScreen->depth_region); #endif for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) { if (ctx->Texture.Unit[i]._ReallyEnabled) { struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current; intel_tex_unmap_images(intel, intel_texture_object(texObj)); } } intel_map_unmap_buffers(intel, GL_FALSE); UNLOCK_HARDWARE(intel); }
/** * Prepare for softare rendering. Map current read/draw framebuffers' * renderbuffes and all currently bound texture objects. * * Old note: Moved locking out to get reasonable span performance. */ void intelSpanRenderStart(GLcontext * ctx) { struct intel_context *intel = intel_context(ctx); GLuint i; intelFinish(&intel->ctx); LOCK_HARDWARE(intel); #if 0 /* Just map the framebuffer and all textures. Bufmgr code will * take care of waiting on the necessary fences: */ intel_region_map(intel->intelScreen, intel->front_region); intel_region_map(intel->intelScreen, intel->back_region); intel_region_map(intel->intelScreen, intel->intelScreen->depth_region); #endif for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) { if (ctx->Texture.Unit[i]._ReallyEnabled) { struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current; intel_tex_map_images(intel, intel_texture_object(texObj)); } } intel_map_unmap_buffers(intel, GL_TRUE); }
static void intel_set_texture_image_mt(struct brw_context *brw, struct gl_texture_image *image, GLenum internal_format, struct intel_mipmap_tree *mt) { struct gl_texture_object *texobj = image->TexObject; struct intel_texture_object *intel_texobj = intel_texture_object(texobj); struct intel_texture_image *intel_image = intel_texture_image(image); _mesa_init_teximage_fields(&brw->ctx, image, mt->logical_width0, mt->logical_height0, 1, 0, internal_format, mt->format); brw->ctx.Driver.FreeTextureImageBuffer(&brw->ctx, image); intel_texobj->needs_validate = true; intel_image->base.RowStride = mt->pitch / mt->cpp; assert(mt->pitch % mt->cpp == 0); intel_miptree_reference(&intel_image->mt, mt); /* Immediately validate the image to the object. */ intel_miptree_reference(&intel_texobj->mt, mt); }
/** * Binds a region to a texture image, like it was uploaded by glTexImage2D(). * * Used for GLX_EXT_texture_from_pixmap and EGL image extensions, */ static void intel_set_texture_image_region(struct gl_context *ctx, struct gl_texture_image *image, struct intel_region *region, GLenum target, GLenum internalFormat, gl_format format) { struct intel_context *intel = intel_context(ctx); struct intel_texture_image *intel_image = intel_texture_image(image); struct gl_texture_object *texobj = image->TexObject; struct intel_texture_object *intel_texobj = intel_texture_object(texobj); _mesa_init_teximage_fields(&intel->ctx, image, region->width, region->height, 1, 0, internalFormat, format); ctx->Driver.FreeTextureImageBuffer(ctx, image); intel_image->mt = intel_miptree_create_for_region(intel, target, image->TexFormat, region); if (intel_image->mt == NULL) return; intel_image->base.RowStride = region->pitch; /* Immediately validate the image to the object. */ intel_miptree_reference(&intel_texobj->mt, intel_image->mt); }
/* * \brief Resolve buffers before drawing. * * Resolve the depth buffer's HiZ buffer and resolve the depth buffer of each * enabled depth texture. * * (In the future, this will also perform MSAA resolves). */ static void brw_predraw_resolve_buffers(struct brw_context *brw) { struct gl_context *ctx = &brw->ctx; struct intel_renderbuffer *depth_irb; struct intel_texture_object *tex_obj; /* Resolve the depth buffer's HiZ buffer. */ depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH); if (depth_irb) intel_renderbuffer_resolve_hiz(brw, depth_irb); /* Resolve depth buffer of each enabled depth texture, and color buffer of * each fast-clear-enabled color texture. */ for (int i = 0; i < BRW_MAX_TEX_UNIT; i++) { if (!ctx->Texture.Unit[i]._ReallyEnabled) continue; tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current); if (!tex_obj || !tex_obj->mt) continue; intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt); intel_miptree_resolve_color(brw, tex_obj->mt); } }
/** * Generate new mipmap data from BASE+1 to BASE+p (the minimally-sized mipmap * level). * * The texture object's miptree must be mapped. * * It would be really nice if this was just called by Mesa whenever mipmaps * needed to be regenerated, rather than us having to remember to do so in * each texture image modification path. * * This function should also include an accelerated path. */ void intel_generate_mipmap(GLcontext *ctx, GLenum target, struct gl_texture_object *texObj) { struct intel_context *intel = intel_context(ctx); struct intel_texture_object *intelObj = intel_texture_object(texObj); GLuint nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1; int face, i; _mesa_generate_mipmap(ctx, target, texObj); /* Update the level information in our private data in the new images, since * it didn't get set as part of a normal TexImage path. */ for (face = 0; face < nr_faces; face++) { for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) { struct intel_texture_image *intelImage; intelImage = intel_texture_image(texObj->Image[face][i]); if (intelImage == NULL) break; intelImage->level = i; intelImage->face = face; /* Unreference the miptree to signal that the new Data is a bare * pointer from mesa. */ intel_miptree_release(intel, &intelImage->mt); } } }
/** * Binds a region to a texture image, like it was uploaded by glTexImage2D(). * * Used for GLX_EXT_texture_from_pixmap and EGL image extensions, */ static void intel_set_texture_image_region(struct gl_context *ctx, struct gl_texture_image *image, struct intel_region *region, GLenum target, GLenum internalFormat, gl_format format, uint32_t offset, GLuint width, GLuint height, GLuint tile_x, GLuint tile_y) { struct intel_context *intel = intel_context(ctx); struct intel_texture_image *intel_image = intel_texture_image(image); struct gl_texture_object *texobj = image->TexObject; struct intel_texture_object *intel_texobj = intel_texture_object(texobj); bool has_surface_tile_offset = false; uint32_t draw_x, draw_y; _mesa_init_teximage_fields(&intel->ctx, image, width, height, 1, 0, internalFormat, format); ctx->Driver.FreeTextureImageBuffer(ctx, image); intel_image->mt = intel_miptree_create_layout(intel, target, image->TexFormat, 0, 0, width, height, 1, true); if (intel_image->mt == NULL) return; intel_region_reference(&intel_image->mt->region, region); intel_image->mt->total_width = width; intel_image->mt->total_height = height; intel_image->mt->level[0].slice[0].x_offset = tile_x; intel_image->mt->level[0].slice[0].y_offset = tile_y; intel_miptree_get_tile_offsets(intel_image->mt, 0, 0, &draw_x, &draw_y); /* From "OES_EGL_image" error reporting. We report GL_INVALID_OPERATION * for EGL images from non-tile aligned sufaces in gen4 hw and earlier which has * trouble resolving back to destination image due to alignment issues. */ if (!has_surface_tile_offset && (draw_x != 0 || draw_y != 0)) { _mesa_error(ctx, GL_INVALID_OPERATION, __func__); intel_miptree_release(&intel_image->mt); return; } intel_texobj->needs_validate = true; intel_image->mt->offset = offset; assert(region->pitch % region->cpp == 0); intel_image->base.RowStride = region->pitch / region->cpp; /* Immediately validate the image to the object. */ intel_miptree_reference(&intel_texobj->mt, intel_image->mt); }
static GLboolean intel_alloc_texture_image_buffer(struct gl_context *ctx, struct gl_texture_image *image) { struct intel_context *intel = intel_context(ctx); struct intel_texture_image *intel_image = intel_texture_image(image); struct gl_texture_object *texobj = image->TexObject; struct intel_texture_object *intel_texobj = intel_texture_object(texobj); GLuint slices; assert(image->Border == 0); /* Because the driver uses AllocTextureImageBuffer() internally, it may end * up mismatched with FreeTextureImageBuffer(), but that is safe to call * multiple times. */ ctx->Driver.FreeTextureImageBuffer(ctx, image); /* Allocate the swrast_texture_image::ImageOffsets array now */ switch (texobj->Target) { case GL_TEXTURE_3D: case GL_TEXTURE_2D_ARRAY: slices = image->Depth; break; case GL_TEXTURE_1D_ARRAY: slices = image->Height; break; default: slices = 1; } assert(!intel_image->base.ImageOffsets); intel_image->base.ImageOffsets = malloc(slices * sizeof(GLuint)); _swrast_init_texture_image(image); if (intel_texobj->mt && intel_miptree_match_image(intel_texobj->mt, image)) { intel_miptree_reference(&intel_image->mt, intel_texobj->mt); DBG("%s: alloc obj %p level %d %dx%dx%d using object's miptree %p\n", __FUNCTION__, texobj, image->Level, image->Width, image->Height, image->Depth, intel_texobj->mt); } else { intel_image->mt = intel_miptree_create_for_teximage(intel, intel_texobj, intel_image, false); /* Even if the object currently has a mipmap tree associated * with it, this one is a more likely candidate to represent the * whole object since our level didn't fit what was there * before, and any lower levels would fit into our miptree. */ intel_miptree_reference(&intel_texobj->mt, intel_image->mt); DBG("%s: alloc obj %p level %d %dx%dx%d using new miptree %p\n", __FUNCTION__, texobj, image->Level, image->Width, image->Height, image->Depth, intel_image->mt); } return true; }
/** * \brief Prepare for entry into glBegin/glEnd block. * * Resolve buffers before entering a glBegin/glEnd block. This is * necessary to prevent recursive calls to FLUSH_VERTICES. * * This resolves the depth buffer of each enabled depth texture and the HiZ * buffer of the attached depth renderbuffer. * * Details * ------- * When vertices are queued during a glBegin/glEnd block, those vertices must * be drawn before any rendering state changes. To ensure this, Mesa calls * FLUSH_VERTICES as a prehook to such state changes. Therefore, * FLUSH_VERTICES itself cannot change rendering state without falling into a * recursive trap. * * This precludes meta-ops, namely buffer resolves, from occurring while any * vertices are queued. To prevent that situation, we resolve some buffers on * entering a glBegin/glEnd * * \see brwCleanupExecEnd() */ static void brwPrepareExecBegin(struct gl_context *ctx) { struct brw_context *brw = brw_context(ctx); struct intel_context *intel = &brw->intel; struct intel_renderbuffer *draw_irb; struct intel_texture_object *tex_obj; if (!intel->has_hiz) { /* The context uses no feature that requires buffer resolves. */ return; } /* Resolve each enabled texture. */ for (int i = 0; i < ctx->Const.MaxTextureImageUnits; i++) { if (!ctx->Texture.Unit[i]._ReallyEnabled) continue; tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current); if (!tex_obj || !tex_obj->mt) continue; intel_miptree_all_slices_resolve_depth(intel, tex_obj->mt); } /* Resolve the attached depth buffer. */ draw_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH); if (draw_irb) { intel_renderbuffer_resolve_hiz(intel, draw_irb); } }
static void intelTexParameter( GLcontext *ctx, GLenum target, struct gl_texture_object *texObj, GLenum pname, const GLfloat *params ) { struct intel_texture_object *intelObj = intel_texture_object(texObj); switch (pname) { /* Anything which can affect the calculation of firstLevel and * lastLevel, as changes to these may invalidate the miptree. */ case GL_TEXTURE_MIN_FILTER: case GL_TEXTURE_MAG_FILTER: case GL_TEXTURE_BASE_LEVEL: case GL_TEXTURE_MAX_LEVEL: case GL_TEXTURE_MIN_LOD: case GL_TEXTURE_MAX_LOD: intelObj->dirty |= 1; break; default: break; } }
static GLboolean intel_bind_renderbuffer_tex_image(struct gl_context *ctx, struct gl_renderbuffer *rb, struct gl_texture_image *image) { struct intel_renderbuffer *irb = intel_renderbuffer(rb); struct intel_texture_image *intel_image = intel_texture_image(image); struct gl_texture_object *texobj = image->TexObject; struct intel_texture_object *intel_texobj = intel_texture_object(texobj); /* We can only handle RB allocated with AllocRenderbufferStorage, or * window-system renderbuffers. */ assert(!rb->TexImage); if (!irb->mt) return false; _mesa_lock_texture(ctx, texobj); _mesa_init_teximage_fields(ctx, image, rb->Width, rb->Height, 1, 0, rb->InternalFormat, rb->Format); image->NumSamples = rb->NumSamples; intel_miptree_reference(&intel_image->mt, irb->mt); /* Immediately validate the image to the object. */ intel_miptree_reference(&intel_texobj->mt, intel_image->mt); intel_texobj->needs_validate = true; _mesa_unlock_texture(ctx, texobj); return true; }
static GLboolean intel_texture_view(struct gl_context *ctx, struct gl_texture_object *texObj, struct gl_texture_object *origTexObj) { struct brw_context *brw = brw_context(ctx); struct intel_texture_object *intel_tex = intel_texture_object(texObj); struct intel_texture_object *intel_orig_tex = intel_texture_object(origTexObj); assert(intel_orig_tex->mt); intel_miptree_reference(&intel_tex->mt, intel_orig_tex->mt); /* Since we can only make views of immutable-format textures, * we can assume that everything is in origTexObj's miptree. * * Mesa core has already made us a copy of all the teximage objects, * except it hasn't copied our mt pointers, etc. */ const int numFaces = _mesa_num_tex_faces(texObj->Target); const int numLevels = texObj->NumLevels; int face; int level; for (face = 0; face < numFaces; face++) { for (level = 0; level < numLevels; level++) { struct gl_texture_image *image = texObj->Image[face][level]; struct intel_texture_image *intel_image = intel_texture_image(image); intel_miptree_reference(&intel_image->mt, intel_orig_tex->mt); } } /* The miptree is in a validated state, so no need to check later. */ intel_tex->needs_validate = false; intel_tex->validated_first_level = 0; intel_tex->validated_last_level = numLevels - 1; /* Set the validated texture format, with the same adjustments that * would have been applied to determine the underlying texture's * mt->format. */ intel_tex->_Format = intel_depth_format_for_depthstencil_format( intel_lower_compressed_format(brw, texObj->Image[0][0]->TexFormat)); return GL_TRUE; }
static __DRIimage * intel_create_image_from_texture(__DRIcontext *context, int target, unsigned texture, int zoffset, int level, unsigned *error, void *loaderPrivate) { __DRIimage *image; struct brw_context *brw = context->driverPrivate; struct gl_texture_object *obj; struct intel_texture_object *iobj; GLuint face = 0; obj = _mesa_lookup_texture(&brw->ctx, texture); if (!obj || obj->Target != target) { *error = __DRI_IMAGE_ERROR_BAD_PARAMETER; return NULL; } if (target == GL_TEXTURE_CUBE_MAP) face = zoffset; _mesa_test_texobj_completeness(&brw->ctx, obj); iobj = intel_texture_object(obj); if (!obj->_BaseComplete || (level > 0 && !obj->_MipmapComplete)) { *error = __DRI_IMAGE_ERROR_BAD_PARAMETER; return NULL; } if (level < obj->BaseLevel || level > obj->_MaxLevel) { *error = __DRI_IMAGE_ERROR_BAD_MATCH; return NULL; } if (target == GL_TEXTURE_3D && obj->Image[face][level]->Depth < zoffset) { *error = __DRI_IMAGE_ERROR_BAD_MATCH; return NULL; } image = calloc(1, sizeof *image); if (image == NULL) { *error = __DRI_IMAGE_ERROR_BAD_ALLOC; return NULL; } image->internal_format = obj->Image[face][level]->InternalFormat; image->format = obj->Image[face][level]->TexFormat; image->data = loaderPrivate; intel_setup_image_from_mipmap_tree(brw, image, iobj->mt, level, zoffset); image->dri_format = driGLFormatToImageFormat(image->format); image->has_depthstencil = iobj->mt->stencil_mt? true : false; if (image->dri_format == MESA_FORMAT_NONE) { *error = __DRI_IMAGE_ERROR_BAD_PARAMETER; free(image); return NULL; } *error = __DRI_IMAGE_ERROR_SUCCESS; return image; }
static bool intel_set_texture_storage_for_buffer_object(struct gl_context *ctx, struct gl_texture_object *tex_obj, struct gl_buffer_object *buffer_obj, uint32_t buffer_offset, uint32_t row_stride, bool read_only) { struct brw_context *brw = brw_context(ctx); struct intel_texture_object *intel_texobj = intel_texture_object(tex_obj); struct gl_texture_image *image = tex_obj->Image[0][0]; struct intel_texture_image *intel_image = intel_texture_image(image); struct intel_buffer_object *intel_buffer_obj = intel_buffer_object(buffer_obj); if (!read_only) { /* Renderbuffers have the restriction that the buffer offset and * surface pitch must be a multiple of the element size. If it's * not, we have to fail and fall back to software. */ int cpp = _mesa_get_format_bytes(image->TexFormat); if (buffer_offset % cpp || row_stride % cpp) { perf_debug("Bad PBO alignment; fallback to CPU mapping\n"); return false; } if (!brw->format_supported_as_render_target[image->TexFormat]) { perf_debug("Non-renderable PBO format; fallback to CPU mapping\n"); return false; } } assert(intel_texobj->mt == NULL); drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_buffer_obj, buffer_offset, row_stride * image->Height); intel_texobj->mt = intel_miptree_create_for_bo(brw, bo, image->TexFormat, buffer_offset, image->Width, image->Height, image->Depth, row_stride, 0); if (!intel_texobj->mt) return false; if (!_swrast_init_texture_image(image)) return false; intel_miptree_reference(&intel_image->mt, intel_texobj->mt); /* The miptree is in a validated state, so no need to check later. */ intel_texobj->needs_validate = false; intel_texobj->validated_first_level = 0; intel_texobj->validated_last_level = 0; intel_texobj->_Format = intel_texobj->mt->format; return true; }
/** * ctx->Driver.AllocTextureStorage() handler. * * Compare this to _mesa_alloc_texture_storage, which would call into * intel_alloc_texture_image_buffer() above. */ static GLboolean intel_alloc_texture_storage(struct gl_context *ctx, struct gl_texture_object *texobj, GLsizei levels, GLsizei width, GLsizei height, GLsizei depth) { struct brw_context *brw = brw_context(ctx); struct intel_texture_object *intel_texobj = intel_texture_object(texobj); struct gl_texture_image *first_image = texobj->Image[0][0]; int num_samples = intel_quantize_num_samples(brw->intelScreen, first_image->NumSamples); const int numFaces = _mesa_num_tex_faces(texobj->Target); int face; int level; /* If the object's current miptree doesn't match what we need, make a new * one. */ if (!intel_texobj->mt || !intel_miptree_match_image(intel_texobj->mt, first_image) || intel_texobj->mt->last_level != levels - 1) { intel_miptree_release(&intel_texobj->mt); intel_texobj->mt = intel_miptree_create(brw, texobj->Target, first_image->TexFormat, 0, levels - 1, width, height, depth, false, /* expect_accelerated */ num_samples, INTEL_MIPTREE_TILING_ANY, false); if (intel_texobj->mt == NULL) { return false; } } for (face = 0; face < numFaces; face++) { for (level = 0; level < levels; level++) { struct gl_texture_image *image = texobj->Image[face][level]; struct intel_texture_image *intel_image = intel_texture_image(image); image->NumSamples = num_samples; _swrast_free_texture_image_buffer(ctx, image); if (!_swrast_init_texture_image(image)) return false; intel_miptree_reference(&intel_image->mt, intel_texobj->mt); } } /* The miptree is in a validated state, so no need to check later. */ intel_texobj->needs_validate = false; intel_texobj->validated_first_level = 0; intel_texobj->validated_last_level = levels - 1; intel_texobj->_Format = intel_texobj->mt->format; return true; }
static void intelDeleteTextureObject(struct gl_context *ctx, struct gl_texture_object *texObj) { struct intel_texture_object *intelObj = intel_texture_object(texObj); intel_miptree_release(&intelObj->mt); _mesa_delete_texture_object(ctx, texObj); }
static void intelGenerateMipmap(GLcontext *ctx, GLenum target, struct gl_texture_object *texObj) { struct intel_context *intel = intel_context(ctx); struct intel_texture_object *intelObj = intel_texture_object(texObj); intel_tex_map_level_images(intel, intelObj, texObj->BaseLevel); intel_generate_mipmap(ctx, target, texObj); intel_tex_unmap_level_images(intel, intelObj, texObj->BaseLevel); }
static GLboolean intel_alloc_texture_image_buffer(struct gl_context *ctx, struct gl_texture_image *image) { struct brw_context *brw = brw_context(ctx); struct intel_texture_image *intel_image = intel_texture_image(image); struct gl_texture_object *texobj = image->TexObject; struct intel_texture_object *intel_texobj = intel_texture_object(texobj); assert(image->Border == 0); /* Quantize sample count */ if (image->NumSamples) { image->NumSamples = intel_quantize_num_samples(brw->screen, image->NumSamples); if (!image->NumSamples) return false; } /* Because the driver uses AllocTextureImageBuffer() internally, it may end * up mismatched with FreeTextureImageBuffer(), but that is safe to call * multiple times. */ ctx->Driver.FreeTextureImageBuffer(ctx, image); if (!_swrast_init_texture_image(image)) return false; if (intel_texobj->mt && intel_miptree_match_image(intel_texobj->mt, image)) { intel_miptree_reference(&intel_image->mt, intel_texobj->mt); DBG("%s: alloc obj %p level %d %dx%dx%d using object's miptree %p\n", __func__, texobj, image->Level, image->Width, image->Height, image->Depth, intel_texobj->mt); } else { intel_image->mt = intel_miptree_create_for_teximage(brw, intel_texobj, intel_image, MIPTREE_CREATE_DEFAULT); if (!intel_image->mt) return false; /* Even if the object currently has a mipmap tree associated * with it, this one is a more likely candidate to represent the * whole object since our level didn't fit what was there * before, and any lower levels would fit into our miptree. */ intel_miptree_reference(&intel_texobj->mt, intel_image->mt); DBG("%s: alloc obj %p level %d %dx%dx%d using new miptree %p\n", __func__, texobj, image->Level, image->Width, image->Height, image->Depth, intel_image->mt); } intel_texobj->needs_validate = true; return true; }
void intelSetTexBuffer2(__DRIcontext *pDRICtx, GLint target, GLint texture_format, __DRIdrawable *dPriv) { struct gl_framebuffer *fb = dPriv->driverPrivate; struct intel_context *intel = pDRICtx->driverPrivate; struct gl_context *ctx = &intel->ctx; struct intel_texture_object *intelObj; struct intel_renderbuffer *rb; struct gl_texture_object *texObj; struct gl_texture_image *texImage; int level = 0, internalFormat = 0; gl_format texFormat = MESA_FORMAT_NONE; texObj = _mesa_get_current_tex_object(ctx, target); intelObj = intel_texture_object(texObj); if (!intelObj) return; if (dPriv->lastStamp != dPriv->dri2.stamp || !pDRICtx->driScreenPriv->dri2.useInvalidate) intel_update_renderbuffers(pDRICtx, dPriv); rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT); /* If the region isn't set, then intel_update_renderbuffers was unable * to get the buffers for the drawable. */ if (!rb || !rb->mt) return; if (rb->mt->cpp == 4) { if (texture_format == __DRI_TEXTURE_FORMAT_RGB) { internalFormat = GL_RGB; texFormat = MESA_FORMAT_XRGB8888; } else { internalFormat = GL_RGBA; texFormat = MESA_FORMAT_ARGB8888; } } else if (rb->mt->cpp == 2) { internalFormat = GL_RGB; texFormat = MESA_FORMAT_RGB565; } _mesa_lock_texture(&intel->ctx, texObj); texImage = _mesa_get_tex_image(ctx, texObj, target, level); intel_set_texture_image_region(ctx, texImage, rb->mt->region, target, internalFormat, texFormat, 0, rb->mt->region->width, rb->mt->region->height, 0, 0); _mesa_unlock_texture(&intel->ctx, texObj); }
static void intel_delete_texture_object( GLcontext *ctx, struct gl_texture_object *texObj ) { struct intel_context *intel = intel_context(ctx); struct intel_texture_object *intelObj = intel_texture_object(texObj); if (intelObj->mt) intel_miptree_destroy(intel, intelObj->mt); _mesa_delete_texture_object( ctx, texObj ); }
static GLenum intel_texture_object_unpurgeable(GLcontext * ctx, struct gl_texture_object *obj, GLenum option) { struct intel_texture_object *intel; intel = intel_texture_object(obj); if (intel->mt == NULL || intel->mt->region == NULL) return GL_UNDEFINED_APPLE; return intel_buffer_unpurgeable (ctx, intel->mt->region->buffer, option); }
static void intel_image_target_texture_2d(struct gl_context *ctx, GLenum target, struct gl_texture_object *texObj, struct gl_texture_image *texImage, GLeglImageOES image_handle) { struct brw_context *brw = brw_context(ctx); struct intel_mipmap_tree *mt; __DRIscreen *dri_screen = brw->screen->driScrnPriv; __DRIimage *image; image = dri_screen->dri2.image->lookupEGLImage(dri_screen, image_handle, dri_screen->loaderPrivate); if (image == NULL) return; /* We support external textures only for EGLImages created with * EGL_EXT_image_dma_buf_import. We may lift that restriction in the future. */ if (target == GL_TEXTURE_EXTERNAL_OES && !image->dma_buf_imported) { _mesa_error(ctx, GL_INVALID_OPERATION, "glEGLImageTargetTexture2DOES(external target is enabled only " "for images created with EGL_EXT_image_dma_buf_import"); return; } /* Disallow depth/stencil textures: we don't have a way to pass the * separate stencil miptree of a GL_DEPTH_STENCIL texture through. */ if (image->has_depthstencil) { _mesa_error(ctx, GL_INVALID_OPERATION, __func__); return; } if (image->planar_format && image->planar_format->nplanes > 0) mt = create_mt_for_planar_dri_image(brw, target, image); else mt = create_mt_for_dri_image(brw, target, image); if (mt == NULL) return; struct intel_texture_object *intel_texobj = intel_texture_object(texObj); intel_texobj->planar_format = image->planar_format; const GLenum internal_format = image->internal_format != 0 ? image->internal_format : _mesa_get_format_base_format(mt->format); intel_set_texture_image_mt(brw, texImage, internal_format, mt); intel_miptree_release(&mt); }
static GLboolean intelIsTextureResident(GLcontext *ctx, struct gl_texture_object *texObj) { #if 0 struct intel_context *intel = intel_context(ctx); struct intel_texture_object *intelObj = intel_texture_object(texObj); return intelObj->mt && intelObj->mt->region && intel_is_region_resident(intel, intelObj->mt->region); #endif return 1; }
static void intelCompressedTexImage2D( GLcontext *ctx, GLenum target, GLint level, GLint internalFormat, GLint width, GLint height, GLint border, GLsizei imageSize, const GLvoid *data, struct gl_texture_object *texObj, struct gl_texture_image *texImage ) { struct intel_texture_object *intelObj = intel_texture_object(texObj); GLuint face = target_to_face(target); _mesa_store_compressed_teximage2d(ctx, target, level, internalFormat, width, height, border, imageSize, data, texObj, texImage); intelObj->dirty_images[face] |= (1 << level); intelObj->dirty |= 1 << face; }
static GLenum intel_texture_object_purgeable(struct gl_context * ctx, struct gl_texture_object *obj, GLenum option) { struct intel_texture_object *intel; (void) ctx; (void) option; intel = intel_texture_object(obj); if (intel->mt == NULL || intel->mt->region == NULL) return GL_RELEASED_APPLE; return intel_buffer_purgeable(intel->mt->region->bo); }
static void intelTexImage1D( GLcontext *ctx, GLenum target, GLint level, GLint internalFormat, GLint width, GLint border, GLenum format, GLenum type, const GLvoid *pixels, const struct gl_pixelstore_attrib *packing, struct gl_texture_object *texObj, struct gl_texture_image *texImage ) { struct intel_texture_object *intelObj = intel_texture_object(texObj); _mesa_store_teximage1d( ctx, target, level, internalFormat, width, border, format, type, pixels, packing, texObj, texImage ); intelObj->dirty_images[0] |= (1 << level); intelObj->dirty |= 1; }
static void brw_update_texture_surface( GLcontext *ctx, GLuint unit, struct brw_surface_state *surf ) { struct intel_context *intel = intel_context(ctx); struct brw_context *brw = brw_context(ctx); struct gl_texture_object *tObj = brw->attribs.Texture->Unit[unit]._Current; struct intel_texture_object *intelObj = intel_texture_object(tObj); struct gl_texture_image *firstImage = tObj->Image[0][intelObj->firstLevel]; memset(surf, 0, sizeof(*surf)); surf->ss0.mipmap_layout_mode = BRW_SURFACE_MIPMAPLAYOUT_BELOW; surf->ss0.surface_type = translate_tex_target(tObj->Target); surf->ss0.surface_format = translate_tex_format(firstImage->TexFormat->MesaFormat, tObj->DepthMode); /* This is ok for all textures with channel width 8bit or less: */ /* surf->ss0.data_return_format = BRW_SURFACERETURNFORMAT_S1; */ /* BRW_NEW_LOCK */ surf->ss1.base_addr = bmBufferOffset(intel, intelObj->mt->region->buffer); surf->ss2.mip_count = intelObj->lastLevel - intelObj->firstLevel; surf->ss2.width = firstImage->Width - 1; surf->ss2.height = firstImage->Height - 1; surf->ss3.tile_walk = BRW_TILEWALK_XMAJOR; surf->ss3.tiled_surface = intelObj->mt->region->tiled; /* always zero */ surf->ss3.pitch = (intelObj->mt->pitch * intelObj->mt->cpp) - 1; surf->ss3.depth = firstImage->Depth - 1; surf->ss4.min_lod = 0; if (tObj->Target == GL_TEXTURE_CUBE_MAP) { surf->ss0.cube_pos_x = 1; surf->ss0.cube_pos_y = 1; surf->ss0.cube_pos_z = 1; surf->ss0.cube_neg_x = 1; surf->ss0.cube_neg_y = 1; surf->ss0.cube_neg_z = 1; } }
void intel_unmap_vertex_shader_textures(struct gl_context *ctx) { struct intel_context *intel = intel_context(ctx); int i; if (ctx->VertexProgram._Current == NULL) return; for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) { if (ctx->Texture.Unit[i]._ReallyEnabled && ctx->VertexProgram._Current->Base.TexturesUsed[i] != 0) { struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current; intel_tex_unmap_images(intel, intel_texture_object(texObj)); } } }
static void intelTexSubImage3D( GLcontext *ctx, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid *pixels, const struct gl_pixelstore_attrib *packing, struct gl_texture_object *texObj, struct gl_texture_image *texImage ) { struct intel_texture_object *intelObj = intel_texture_object(texObj); _mesa_store_texsubimage3d(ctx, target, level, xoffset, yoffset, zoffset, width, height, depth, format, type, pixels, packing, texObj, texImage); intelObj->dirty_images[0] |= (1 << level); intelObj->dirty |= 1 << 0; }