/** * \param mode bitmask of GL_MAP_READ_BIT, GL_MAP_WRITE_BIT */ static void intel_tex_map_image_for_swrast(struct intel_context *intel, struct intel_texture_image *intel_image, GLbitfield mode) { int level; int face; struct intel_mipmap_tree *mt; unsigned int x, y; if (!intel_image || !intel_image->mt) return; level = intel_image->base.Base.Level; face = intel_image->base.Base.Face; mt = intel_image->mt; for (int i = 0; i < mt->level[level].depth; i++) intel_miptree_slice_resolve_depth(intel, mt, level, i); if (mt->target == GL_TEXTURE_3D || mt->target == GL_TEXTURE_2D_ARRAY || mt->target == GL_TEXTURE_1D_ARRAY) { int i; /* ImageOffsets[] is only used for swrast's fetch_texel_3d, so we can't * share code with the normal path. */ for (i = 0; i < mt->level[level].depth; i++) { intel_miptree_get_image_offset(mt, level, i, &x, &y); intel_image->base.ImageOffsets[i] = x + y * (mt->region->pitch / mt->region->cpp); } DBG("%s \n", __FUNCTION__); intel_image->base.Map = intel_miptree_map_raw(intel, mt); } else { assert(intel_image->base.Base.Depth == 1); intel_miptree_get_image_offset(mt, level, face, &x, &y); DBG("%s: (%d,%d) -> (%d, %d)/%d\n", __FUNCTION__, face, level, x, y, mt->region->pitch); intel_image->base.Map = intel_miptree_map_raw(intel, mt) + x * mt->cpp + y * mt->region->pitch; } assert(mt->region->pitch % mt->region->cpp == 0); intel_image->base.RowStride = mt->region->pitch / mt->region->cpp; }
static void intel_miptree_map_blit(struct intel_context *intel, struct intel_mipmap_tree *mt, struct intel_miptree_map *map, unsigned int level, unsigned int slice) { map->mt = intel_miptree_create(intel, GL_TEXTURE_2D, mt->format, 0, 0, map->w, map->h, 1, false, INTEL_MIPTREE_TILING_NONE); if (!map->mt) { fprintf(stderr, "Failed to allocate blit temporary\n"); goto fail; } map->stride = map->mt->region->pitch; if (!intel_miptree_blit(intel, mt, level, slice, map->x, map->y, false, map->mt, 0, 0, 0, 0, false, map->w, map->h, GL_COPY)) { fprintf(stderr, "Failed to blit\n"); goto fail; } intel_batchbuffer_flush(intel); map->ptr = intel_miptree_map_raw(intel, map->mt); DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__, map->x, map->y, map->w, map->h, mt, _mesa_get_format_name(mt->format), level, slice, map->ptr, map->stride); return; fail: intel_miptree_release(&map->mt); map->ptr = NULL; map->stride = 0; }
static void intel_miptree_map_gtt(struct intel_context *intel, struct intel_mipmap_tree *mt, struct intel_miptree_map *map, unsigned int level, unsigned int slice) { unsigned int bw, bh; void *base; unsigned int image_x, image_y; int x = map->x; int y = map->y; /* For compressed formats, the stride is the number of bytes per * row of blocks. intel_miptree_get_image_offset() already does * the divide. */ _mesa_get_format_block_size(mt->format, &bw, &bh); assert(y % bh == 0); y /= bh; base = intel_miptree_map_raw(intel, mt) + mt->offset; if (base == NULL) map->ptr = NULL; else { /* Note that in the case of cube maps, the caller must have passed the * slice number referencing the face. */ intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y); x += image_x; y += image_y; map->stride = mt->region->pitch; map->ptr = base + y * map->stride + x * mt->cpp; } DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__, map->x, map->y, map->w, map->h, mt, _mesa_get_format_name(mt->format), x, y, map->ptr, map->stride); }
static bool intel_blit_texsubimage(struct gl_context * ctx, struct gl_texture_image *texImage, GLint xoffset, GLint yoffset, GLint width, GLint height, GLenum format, GLenum type, const void *pixels, const struct gl_pixelstore_attrib *packing) { struct intel_context *intel = intel_context(ctx); struct intel_texture_image *intelImage = intel_texture_image(texImage); /* Try to do a blit upload of the subimage if the texture is * currently busy. */ if (!intelImage->mt) return false; /* The blitter can't handle Y tiling */ if (intelImage->mt->region->tiling == I915_TILING_Y) return false; if (texImage->TexObject->Target != GL_TEXTURE_2D) return false; if (!drm_intel_bo_busy(intelImage->mt->region->bo)) return false; DBG("BLT subimage %s target %s level %d offset %d,%d %dx%d\n", __func__, _mesa_enum_to_string(texImage->TexObject->Target), texImage->Level, xoffset, yoffset, width, height); pixels = _mesa_validate_pbo_teximage(ctx, 2, width, height, 1, format, type, pixels, packing, "glTexSubImage"); if (!pixels) return false; struct intel_mipmap_tree *temp_mt = intel_miptree_create(intel, GL_TEXTURE_2D, texImage->TexFormat, 0, 0, width, height, 1, false, INTEL_MIPTREE_TILING_NONE); if (!temp_mt) goto err; GLubyte *dst = intel_miptree_map_raw(intel, temp_mt); if (!dst) goto err; if (!_mesa_texstore(ctx, 2, texImage->_BaseFormat, texImage->TexFormat, temp_mt->region->pitch, &dst, width, height, 1, format, type, pixels, packing)) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage"); } intel_miptree_unmap_raw(temp_mt); bool ret; ret = intel_miptree_blit(intel, temp_mt, 0, 0, 0, 0, false, intelImage->mt, texImage->Level, texImage->Face, xoffset, yoffset, false, width, height, COLOR_LOGICOP_COPY); assert(ret); intel_miptree_release(&temp_mt); _mesa_unmap_teximage_pbo(ctx, packing); return ret; err: _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage"); intel_miptree_release(&temp_mt); _mesa_unmap_teximage_pbo(ctx, packing); return false; }