/** * \brief A fast path for glReadPixels * * This fast path is taken when the source format is BGRA, RGBA, * A or L and when the texture memory is X- or Y-tiled. It downloads * the source data by directly mapping the memory without a GTT fence. * This then needs to be de-tiled on the CPU before presenting the data to * the user in the linear fasion. * * This is a performance win over the conventional texture download path. * In the conventional texture download path, the texture is either mapped * through the GTT or copied to a linear buffer with the blitter before * handing off to a software path. This allows us to avoid round-tripping * through the GPU (in the case where we would be blitting) and do only a * single copy operation. */ static bool intel_readpixels_tiled_memcpy(struct gl_context * ctx, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, GLvoid * pixels, const struct gl_pixelstore_attrib *pack) { struct brw_context *brw = brw_context(ctx); struct gl_renderbuffer *rb = ctx->ReadBuffer->_ColorReadBuffer; const struct gen_device_info *devinfo = &brw->screen->devinfo; /* This path supports reading from color buffers only */ if (rb == NULL) return false; struct intel_renderbuffer *irb = intel_renderbuffer(rb); int dst_pitch; /* The miptree's buffer. */ struct brw_bo *bo; uint32_t cpp; mem_copy_fn mem_copy = NULL; /* This fastpath is restricted to specific renderbuffer types: * a 2D BGRA, RGBA, L8 or A8 texture. It could be generalized to support * more types. */ if (!devinfo->has_llc || !(type == GL_UNSIGNED_BYTE || type == GL_UNSIGNED_INT_8_8_8_8_REV) || pixels == NULL || _mesa_is_bufferobj(pack->BufferObj) || pack->Alignment > 4 || pack->SkipPixels > 0 || pack->SkipRows > 0 || (pack->RowLength != 0 && pack->RowLength != width) || pack->SwapBytes || pack->LsbFirst || pack->Invert) return false; /* Only a simple blit, no scale, bias or other mapping. */ if (ctx->_ImageTransferState) return false; /* It is possible that the renderbuffer (or underlying texture) is * multisampled. Since ReadPixels from a multisampled buffer requires a * multisample resolve, we can't handle this here */ if (rb->NumSamples > 1) return false; /* We can't handle copying from RGBX or BGRX because the tiled_memcpy * function doesn't set the last channel to 1. Note this checks BaseFormat * rather than TexFormat in case the RGBX format is being simulated with an * RGBA format. */ if (rb->_BaseFormat == GL_RGB) return false; if (!intel_get_memcpy(rb->Format, format, type, &mem_copy, &cpp)) return false; if (!irb->mt || (irb->mt->surf.tiling != ISL_TILING_X && irb->mt->surf.tiling != ISL_TILING_Y0)) { /* The algorithm is written only for X- or Y-tiled memory. */ return false; } /* tiled_to_linear() assumes that if the object is swizzled, it is using * I915_BIT6_SWIZZLE_9_10 for X and I915_BIT6_SWIZZLE_9 for Y. This is only * true on gen5 and above. * * The killer on top is that some gen4 have an L-shaped swizzle mode, where * parts of the memory aren't swizzled at all. Userspace just can't handle * that. */ if (devinfo->gen < 5 && brw->has_swizzling) return false; /* Since we are going to read raw data to the miptree, we need to resolve * any pending fast color clears before we start. */ intel_miptree_access_raw(brw, irb->mt, irb->mt_level, irb->mt_layer, false); bo = irb->mt->bo; if (brw_batch_references(&brw->batch, bo)) { perf_debug("Flushing before mapping a referenced bo.\n"); intel_batchbuffer_flush(brw); } void *map = brw_bo_map(brw, bo, MAP_READ | MAP_RAW); if (map == NULL) { DBG("%s: failed to map bo\n", __func__); return false; } unsigned slice_offset_x, slice_offset_y; intel_miptree_get_image_offset(irb->mt, irb->mt_level, irb->mt_layer, &slice_offset_x, &slice_offset_y); xoffset += slice_offset_x; yoffset += slice_offset_y; dst_pitch = _mesa_image_row_stride(pack, width, format, type); /* For a window-system renderbuffer, the buffer is actually flipped * vertically, so we need to handle that. Since the detiling function * can only really work in the forwards direction, we have to be a * little creative. First, we compute the Y-offset of the first row of * the renderbuffer (in renderbuffer coordinates). We then match that * with the last row of the client's data. Finally, we give * tiled_to_linear a negative pitch so that it walks through the * client's data backwards as it walks through the renderbufer forwards. */ if (rb->Name == 0) { yoffset = rb->Height - yoffset - height; pixels += (ptrdiff_t) (height - 1) * dst_pitch; dst_pitch = -dst_pitch; } /* We postponed printing this message until having committed to executing * the function. */ DBG("%s: x,y=(%d,%d) (w,h)=(%d,%d) format=0x%x type=0x%x " "mesa_format=0x%x tiling=%d " "pack=(alignment=%d row_length=%d skip_pixels=%d skip_rows=%d)\n", __func__, xoffset, yoffset, width, height, format, type, rb->Format, irb->mt->surf.tiling, pack->Alignment, pack->RowLength, pack->SkipPixels, pack->SkipRows); tiled_to_linear( xoffset * cpp, (xoffset + width) * cpp, yoffset, yoffset + height, pixels - (ptrdiff_t) yoffset * dst_pitch - (ptrdiff_t) xoffset * cpp, map + irb->mt->offset, dst_pitch, irb->mt->surf.row_pitch, brw->has_swizzling, irb->mt->surf.tiling, mem_copy ); brw_bo_unmap(bo); return true; }
/** * \brief A fast path for glGetTexImage. * * \see intel_readpixels_tiled_memcpy() */ bool intel_gettexsubimage_tiled_memcpy(struct gl_context *ctx, struct gl_texture_image *texImage, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, GLvoid *pixels, const struct gl_pixelstore_attrib *packing) { struct brw_context *brw = brw_context(ctx); struct intel_texture_image *image = intel_texture_image(texImage); int dst_pitch; /* The miptree's buffer. */ drm_intel_bo *bo; int error = 0; uint32_t cpp; mem_copy_fn mem_copy = NULL; /* This fastpath is restricted to specific texture types: * a 2D BGRA, RGBA, L8 or A8 texture. It could be generalized to support * more types. * * FINISHME: The restrictions below on packing alignment and packing row * length are likely unneeded now because we calculate the destination stride * with _mesa_image_row_stride. However, before removing the restrictions * we need tests. */ if (!brw->has_llc || !(type == GL_UNSIGNED_BYTE || type == GL_UNSIGNED_INT_8_8_8_8_REV) || !(texImage->TexObject->Target == GL_TEXTURE_2D || texImage->TexObject->Target == GL_TEXTURE_RECTANGLE) || pixels == NULL || _mesa_is_bufferobj(packing->BufferObj) || packing->Alignment > 4 || packing->SkipPixels > 0 || packing->SkipRows > 0 || (packing->RowLength != 0 && packing->RowLength != width) || packing->SwapBytes || packing->LsbFirst || packing->Invert) return false; /* We can't handle copying from RGBX or BGRX because the tiled_memcpy * function doesn't set the last channel to 1. */ if (texImage->TexFormat == MESA_FORMAT_B8G8R8X8_UNORM || texImage->TexFormat == MESA_FORMAT_R8G8B8X8_UNORM) return false; if (!intel_get_memcpy(texImage->TexFormat, format, type, &mem_copy, &cpp, INTEL_DOWNLOAD)) return false; /* If this is a nontrivial texture view, let another path handle it instead. */ if (texImage->TexObject->MinLayer) return false; if (!image->mt || (image->mt->tiling != I915_TILING_X && image->mt->tiling != I915_TILING_Y)) { /* The algorithm is written only for X- or Y-tiled memory. */ return false; } /* Since we are going to write raw data to the miptree, we need to resolve * any pending fast color clears before we start. */ intel_miptree_resolve_color(brw, image->mt); bo = image->mt->bo; if (drm_intel_bo_references(brw->batch.bo, bo)) { perf_debug("Flushing before mapping a referenced bo.\n"); intel_batchbuffer_flush(brw); } error = brw_bo_map(brw, bo, false /* write enable */, "miptree"); if (error) { DBG("%s: failed to map bo\n", __func__); return false; } dst_pitch = _mesa_image_row_stride(packing, width, format, type); DBG("%s: level=%d x,y=(%d,%d) (w,h)=(%d,%d) format=0x%x type=0x%x " "mesa_format=0x%x tiling=%d " "packing=(alignment=%d row_length=%d skip_pixels=%d skip_rows=%d)\n", __func__, texImage->Level, xoffset, yoffset, width, height, format, type, texImage->TexFormat, image->mt->tiling, packing->Alignment, packing->RowLength, packing->SkipPixels, packing->SkipRows); int level = texImage->Level + texImage->TexObject->MinLevel; /* Adjust x and y offset based on miplevel */ xoffset += image->mt->level[level].level_x; yoffset += image->mt->level[level].level_y; tiled_to_linear( xoffset * cpp, (xoffset + width) * cpp, yoffset, yoffset + height, pixels - (ptrdiff_t) yoffset * dst_pitch - (ptrdiff_t) xoffset * cpp, bo->virtual, dst_pitch, image->mt->pitch, brw->has_swizzling, image->mt->tiling, mem_copy ); drm_intel_bo_unmap(bo); return true; }
/** * \brief A fast path for glReadPixels * * This fast path is taken when the source format is BGRA, RGBA, * A or L and when the texture memory is X- or Y-tiled. It downloads * the source data by directly mapping the memory without a GTT fence. * This then needs to be de-tiled on the CPU before presenting the data to * the user in the linear fasion. * * This is a performance win over the conventional texture download path. * In the conventional texture download path, the texture is either mapped * through the GTT or copied to a linear buffer with the blitter before * handing off to a software path. This allows us to avoid round-tripping * through the GPU (in the case where we would be blitting) and do only a * single copy operation. */ static bool intel_readpixels_tiled_memcpy(struct gl_context * ctx, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, GLvoid * pixels, const struct gl_pixelstore_attrib *pack) { struct brw_context *brw = brw_context(ctx); struct gl_renderbuffer *rb = ctx->ReadBuffer->_ColorReadBuffer; /* This path supports reading from color buffers only */ if (rb == NULL) return false; struct intel_renderbuffer *irb = intel_renderbuffer(rb); int dst_pitch; /* The miptree's buffer. */ drm_intel_bo *bo; int error = 0; uint32_t cpp; mem_copy_fn mem_copy = NULL; /* This fastpath is restricted to specific renderbuffer types: * a 2D BGRA, RGBA, L8 or A8 texture. It could be generalized to support * more types. */ if (!brw->has_llc || !(type == GL_UNSIGNED_BYTE || type == GL_UNSIGNED_INT_8_8_8_8_REV) || pixels == NULL || _mesa_is_bufferobj(pack->BufferObj) || pack->Alignment > 4 || pack->SkipPixels > 0 || pack->SkipRows > 0 || (pack->RowLength != 0 && pack->RowLength != width) || pack->SwapBytes || pack->LsbFirst || pack->Invert) return false; /* Only a simple blit, no scale, bias or other mapping. */ if (ctx->_ImageTransferState) return false; /* This renderbuffer can come from a texture. In this case, we impose * some of the same restrictions we have for textures and adjust for * miplevels. */ if (rb->TexImage) { if (rb->TexImage->TexObject->Target != GL_TEXTURE_2D && rb->TexImage->TexObject->Target != GL_TEXTURE_RECTANGLE) return false; int level = rb->TexImage->Level + rb->TexImage->TexObject->MinLevel; /* Adjust x and y offset based on miplevel */ xoffset += irb->mt->level[level].level_x; yoffset += irb->mt->level[level].level_y; } /* It is possible that the renderbuffer (or underlying texture) is * multisampled. Since ReadPixels from a multisampled buffer requires a * multisample resolve, we can't handle this here */ if (rb->NumSamples > 1) return false; /* We can't handle copying from RGBX or BGRX because the tiled_memcpy * function doesn't set the last channel to 1. Note this checks BaseFormat * rather than TexFormat in case the RGBX format is being simulated with an * RGBA format. */ if (rb->_BaseFormat == GL_RGB) return false; if (!intel_get_memcpy(rb->Format, format, type, &mem_copy, &cpp, INTEL_DOWNLOAD)) return false; if (!irb->mt || (irb->mt->tiling != I915_TILING_X && irb->mt->tiling != I915_TILING_Y)) { /* The algorithm is written only for X- or Y-tiled memory. */ return false; } /* Since we are going to read raw data to the miptree, we need to resolve * any pending fast color clears before we start. */ intel_miptree_resolve_color(brw, irb->mt); bo = irb->mt->bo; if (drm_intel_bo_references(brw->batch.bo, bo)) { perf_debug("Flushing before mapping a referenced bo.\n"); intel_batchbuffer_flush(brw); } error = brw_bo_map(brw, bo, false /* write enable */, "miptree"); if (error) { DBG("%s: failed to map bo\n", __func__); return false; } dst_pitch = _mesa_image_row_stride(pack, width, format, type); /* For a window-system renderbuffer, the buffer is actually flipped * vertically, so we need to handle that. Since the detiling function * can only really work in the forwards direction, we have to be a * little creative. First, we compute the Y-offset of the first row of * the renderbuffer (in renderbuffer coordinates). We then match that * with the last row of the client's data. Finally, we give * tiled_to_linear a negative pitch so that it walks through the * client's data backwards as it walks through the renderbufer forwards. */ if (rb->Name == 0) { yoffset = rb->Height - yoffset - height; pixels += (ptrdiff_t) (height - 1) * dst_pitch; dst_pitch = -dst_pitch; } /* We postponed printing this message until having committed to executing * the function. */ DBG("%s: x,y=(%d,%d) (w,h)=(%d,%d) format=0x%x type=0x%x " "mesa_format=0x%x tiling=%d " "pack=(alignment=%d row_length=%d skip_pixels=%d skip_rows=%d)\n", __func__, xoffset, yoffset, width, height, format, type, rb->Format, irb->mt->tiling, pack->Alignment, pack->RowLength, pack->SkipPixels, pack->SkipRows); tiled_to_linear( xoffset * cpp, (xoffset + width) * cpp, yoffset, yoffset + height, pixels - (ptrdiff_t) yoffset * dst_pitch - (ptrdiff_t) xoffset * cpp, bo->virtual, dst_pitch, irb->mt->pitch, brw->has_swizzling, irb->mt->tiling, mem_copy ); drm_intel_bo_unmap(bo); return true; }