static __DRIimage * intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate) { int width, height, offset, stride, dri_format, index; struct intel_image_format *f; uint32_t mask_x, mask_y; __DRIimage *image; if (parent == NULL || parent->planar_format == NULL) return NULL; f = parent->planar_format; if (plane >= f->nplanes) return NULL; width = parent->region->width >> f->planes[plane].width_shift; height = parent->region->height >> f->planes[plane].height_shift; dri_format = f->planes[plane].dri_format; index = f->planes[plane].buffer_index; offset = parent->offsets[index]; stride = parent->strides[index]; image = intel_allocate_image(dri_format, loaderPrivate); if (image == NULL) return NULL; if (offset + height * stride > parent->region->bo->size) { _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds"); free(image); return NULL; } image->region = calloc(sizeof(*image->region), 1); if (image->region == NULL) { free(image); return NULL; } image->region->cpp = _mesa_get_format_bytes(image->format); image->region->width = width; image->region->height = height; image->region->pitch = stride; image->region->refcount = 1; image->region->bo = parent->region->bo; drm_intel_bo_reference(image->region->bo); image->region->tiling = parent->region->tiling; image->offset = offset; intel_setup_image_from_dimensions(image); intel_region_get_tile_masks(image->region, &mask_x, &mask_y, false); if (offset & mask_x) _mesa_warning(NULL, "intel_create_sub_image: offset not on tile boundary"); return image; }
/** * Rendering to tiled buffers requires that the base address of the * buffer be aligned to a page boundary. We generally render to * textures by pointing the surface at the mipmap image level, which * may not be aligned to a tile boundary. * * This function returns an appropriately-aligned base offset * according to the tiling restrictions, plus any required x/y offset * from there. */ uint32_t intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb, uint32_t *tile_x, uint32_t *tile_y) { struct intel_region *region = irb->mt->region; uint32_t mask_x, mask_y; intel_region_get_tile_masks(region, &mask_x, &mask_y, false); *tile_x = irb->draw_x & mask_x; *tile_y = irb->draw_y & mask_y; return intel_region_get_aligned_offset(region, irb->draw_x & ~mask_x, irb->draw_y & ~mask_y, false); }
/** * Split x_offset and y_offset into a base offset (in bytes) and a remaining * x/y offset (in pixels). Note: we can't do this by calling * intel_renderbuffer_tile_offsets(), because the offsets may have been * adjusted to account for Y vs. W tiling differences. So we compute it * directly from the adjusted offsets. */ uint32_t brw_blorp_surface_info::compute_tile_offsets(uint32_t *tile_x, uint32_t *tile_y) const { struct intel_region *region = mt->region; uint32_t mask_x, mask_y; intel_region_get_tile_masks(region, &mask_x, &mask_y, map_stencil_as_y_tiled); *tile_x = x_offset & mask_x; *tile_y = y_offset & mask_y; return intel_region_get_aligned_offset(region, x_offset & ~mask_x, y_offset & ~mask_y, map_stencil_as_y_tiled); }
/** * Rendering with tiled buffers requires that the base address of the buffer * be aligned to a page boundary. For renderbuffers, and sometimes with * textures, we may want the surface to point at a texture image level that * isn't at a page boundary. * * This function returns an appropriately-aligned base offset * according to the tiling restrictions, plus any required x/y offset * from there. */ uint32_t intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt, GLuint level, GLuint slice, uint32_t *tile_x, uint32_t *tile_y) { struct intel_region *region = mt->region; uint32_t x, y; uint32_t mask_x, mask_y; intel_region_get_tile_masks(region, &mask_x, &mask_y, false); intel_miptree_get_image_offset(mt, level, slice, &x, &y); *tile_x = x & mask_x; *tile_y = y & mask_y; return intel_region_get_aligned_offset(region, x & ~mask_x, y & ~mask_y, false); }
static __DRIimage * intel_create_sub_image(__DRIimage *parent, int width, int height, int dri_format, int offset, int pitch, void *loaderPrivate) { __DRIimage *image; int cpp; uint32_t mask_x, mask_y; image = intel_allocate_image(dri_format, loaderPrivate); cpp = _mesa_get_format_bytes(image->format); if (offset + height * cpp * pitch > parent->region->bo->size) { _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds"); FREE(image); return NULL; } image->region = calloc(sizeof(*image->region), 1); if (image->region == NULL) { FREE(image); return NULL; } image->region->cpp = _mesa_get_format_bytes(image->format); image->region->width = width; image->region->height = height; image->region->pitch = pitch; image->region->refcount = 1; image->region->bo = parent->region->bo; drm_intel_bo_reference(image->region->bo); image->region->tiling = parent->region->tiling; image->region->screen = parent->region->screen; image->offset = offset; intel_region_get_tile_masks(image->region, &mask_x, &mask_y); if (offset & mask_x) _mesa_warning(NULL, "intel_create_sub_image: offset not on tile boundary"); return image; }
static void emit_depthbuffer(struct brw_context *brw) { struct intel_context *intel = &brw->intel; struct gl_context *ctx = &intel->ctx; struct gl_framebuffer *fb = ctx->DrawBuffer; /* _NEW_BUFFERS */ struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH); struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL); struct intel_mipmap_tree *stencil_mt = NULL; struct intel_region *hiz_region = NULL; unsigned int len; bool separate_stencil = false; /* Amount by which drawing should be offset in order to draw to the * appropriate miplevel/zoffset/cubeface. We will extract these values * from depth_irb or stencil_irb once we determine which is present. */ uint32_t draw_x = 0, draw_y = 0; /* Masks used to determine how much of the draw_x and draw_y offsets should * be performed using the fine adjustment of "depth coordinate offset X/Y" * (dw5 of 3DSTATE_DEPTH_BUFFER). Any remaining coarse adjustment will be * performed by changing the base addresses of the buffers. * * Since the HiZ, depth, and stencil buffers all use the same "depth * coordinate offset X/Y" values, we need to make sure that the coarse * adjustment will be possible to apply to all three buffers. Since coarse * adjustment can only be applied in multiples of the tile size, we will OR * together the tile masks of all the buffers to determine which offsets to * perform as fine adjustments. */ uint32_t tile_mask_x = 0, tile_mask_y = 0; if (depth_irb) { intel_region_get_tile_masks(depth_irb->mt->region, &tile_mask_x, &tile_mask_y); } if (depth_irb && depth_irb->mt && depth_irb->mt->hiz_mt) { hiz_region = depth_irb->mt->hiz_mt->region; uint32_t hiz_tile_mask_x, hiz_tile_mask_y; intel_region_get_tile_masks(hiz_region, &hiz_tile_mask_x, &hiz_tile_mask_y); /* Each HiZ row represents 2 rows of pixels */ hiz_tile_mask_y = hiz_tile_mask_y << 1 | 1; tile_mask_x |= hiz_tile_mask_x; tile_mask_y |= hiz_tile_mask_y; } /* 3DSTATE_DEPTH_BUFFER, 3DSTATE_STENCIL_BUFFER are both * non-pipelined state that will need the PIPE_CONTROL workaround. */ if (intel->gen == 6) { intel_emit_post_sync_nonzero_flush(intel); intel_emit_depth_stall_flushes(intel); } /* Find the real separate stencil mt if present. */ if (stencil_irb) { stencil_mt = stencil_irb->mt; if (stencil_mt->stencil_mt) stencil_mt = stencil_mt->stencil_mt; if (stencil_mt->format == MESA_FORMAT_S8) { separate_stencil = true; /* Separate stencil buffer uses 64x64 tiles. */ tile_mask_x |= 63; tile_mask_y |= 63; } else { uint32_t stencil_tile_mask_x, stencil_tile_mask_y; intel_region_get_tile_masks(stencil_mt->region, &stencil_tile_mask_x, &stencil_tile_mask_y); tile_mask_x |= stencil_tile_mask_x; tile_mask_y |= stencil_tile_mask_y; } } /* If there's a packed depth/stencil bound to stencil only, we need to * emit the packed depth/stencil buffer packet. */ if (!depth_irb && stencil_irb && !separate_stencil) depth_irb = stencil_irb; if (intel->gen >= 6) len = 7; else if (intel->is_g4x || intel->gen == 5) len = 6; else len = 5; if (!depth_irb && !separate_stencil) { BEGIN_BATCH(len); OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2)); OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) | (BRW_SURFACE_NULL << 29)); OUT_BATCH(0); OUT_BATCH(0); OUT_BATCH(0); if (intel->is_g4x || intel->gen >= 5) OUT_BATCH(0); if (intel->gen >= 6) OUT_BATCH(0); ADVANCE_BATCH(); } else if (!depth_irb && separate_stencil) { uint32_t tile_x, tile_y; /* * There exists a separate stencil buffer but no depth buffer. * * The stencil buffer inherits most of its fields from * 3DSTATE_DEPTH_BUFFER: namely the tile walk, surface type, width, and * height. * * Enable the hiz bit because it and the separate stencil bit must have * the same value. From Section 2.11.5.6.1.1 3DSTATE_DEPTH_BUFFER, Bit * 1.21 "Separate Stencil Enable": * [DevIL]: If this field is enabled, Hierarchical Depth Buffer * Enable must also be enabled. * * [DevGT]: This field must be set to the same value (enabled or * disabled) as Hierarchical Depth Buffer Enable * * The tiled bit must be set. From the Sandybridge PRM, Volume 2, Part 1, * Section 7.5.5.1.1 3DSTATE_DEPTH_BUFFER, Bit 1.27 Tiled Surface: * [DevGT+]: This field must be set to TRUE. */ assert(intel->has_separate_stencil); draw_x = stencil_irb->draw_x; draw_y = stencil_irb->draw_y; tile_x = draw_x & tile_mask_x; tile_y = draw_y & tile_mask_y; /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327 * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth * Coordinate Offset X/Y": * * "The 3 LSBs of both offsets must be zero to ensure correct * alignment" * * We have no guarantee that tile_x and tile_y are correctly aligned, * since they are determined by the mipmap layout, which is only aligned * to multiples of 4. * * So, to avoid hanging the GPU, just smash the low order 3 bits of * tile_x and tile_y to 0. This is a temporary workaround until we come * up with a better solution. */ tile_x &= ~7; tile_y &= ~7; BEGIN_BATCH(len); OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2)); OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) | (1 << 21) | /* separate stencil enable */ (1 << 22) | /* hiz enable */ (BRW_TILEWALK_YMAJOR << 26) | (1 << 27) | /* tiled surface */ (BRW_SURFACE_2D << 29)); OUT_BATCH(0); OUT_BATCH(((stencil_irb->Base.Base.Width + tile_x - 1) << 6) | (stencil_irb->Base.Base.Height + tile_y - 1) << 19); OUT_BATCH(0); if (intel->is_g4x || intel->gen >= 5) OUT_BATCH(tile_x | (tile_y << 16)); else assert(tile_x == 0 && tile_y == 0); if (intel->gen >= 6) OUT_BATCH(0); ADVANCE_BATCH(); } else { struct intel_region *region = depth_irb->mt->region; uint32_t tile_x, tile_y, offset; /* If using separate stencil, hiz must be enabled. */ assert(!separate_stencil || hiz_region); assert(intel->gen < 6 || region->tiling == I915_TILING_Y); assert(!hiz_region || region->tiling == I915_TILING_Y); draw_x = depth_irb->draw_x; draw_y = depth_irb->draw_y; tile_x = draw_x & tile_mask_x; tile_y = draw_y & tile_mask_y; /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327 * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth * Coordinate Offset X/Y": * * "The 3 LSBs of both offsets must be zero to ensure correct * alignment" * * We have no guarantee that tile_x and tile_y are correctly aligned, * since they are determined by the mipmap layout, which is only aligned * to multiples of 4. * * So, to avoid hanging the GPU, just smash the low order 3 bits of * tile_x and tile_y to 0. This is a temporary workaround until we come * up with a better solution. */ tile_x &= ~7; tile_y &= ~7; offset = intel_region_get_aligned_offset(region, draw_x & ~tile_mask_x, draw_y & ~tile_mask_y); BEGIN_BATCH(len); OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2)); OUT_BATCH(((region->pitch * region->cpp) - 1) | (brw_depthbuffer_format(brw) << 18) | ((hiz_region ? 1 : 0) << 21) | /* separate stencil enable */ ((hiz_region ? 1 : 0) << 22) | /* hiz enable */ (BRW_TILEWALK_YMAJOR << 26) | ((region->tiling != I915_TILING_NONE) << 27) | (BRW_SURFACE_2D << 29)); OUT_RELOC(region->bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, offset); OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) | (((depth_irb->Base.Base.Width + tile_x) - 1) << 6) | (((depth_irb->Base.Base.Height + tile_y) - 1) << 19)); OUT_BATCH(0); if (intel->is_g4x || intel->gen >= 5) OUT_BATCH(tile_x | (tile_y << 16)); else assert(tile_x == 0 && tile_y == 0); if (intel->gen >= 6) OUT_BATCH(0); ADVANCE_BATCH(); } if (hiz_region || separate_stencil) { /* * In the 3DSTATE_DEPTH_BUFFER batch emitted above, the 'separate * stencil enable' and 'hiz enable' bits were set. Therefore we must * emit 3DSTATE_HIER_DEPTH_BUFFER and 3DSTATE_STENCIL_BUFFER. Even if * there is no stencil buffer, 3DSTATE_STENCIL_BUFFER must be emitted; * failure to do so causes hangs on gen5 and a stall on gen6. */ /* Emit hiz buffer. */ if (hiz_region) { uint32_t hiz_offset = intel_region_get_aligned_offset(hiz_region, draw_x & ~tile_mask_x, (draw_y & ~tile_mask_y) / 2); BEGIN_BATCH(3); OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2)); OUT_BATCH(hiz_region->pitch * hiz_region->cpp - 1); OUT_RELOC(hiz_region->bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, hiz_offset); ADVANCE_BATCH(); } else { BEGIN_BATCH(3); OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2)); OUT_BATCH(0); OUT_BATCH(0); ADVANCE_BATCH(); } /* Emit stencil buffer. */ if (separate_stencil) { struct intel_region *region = stencil_mt->region; /* Note: we can't compute the stencil offset using * intel_region_get_aligned_offset(), because stencil_region claims * that the region is untiled; in fact it's W tiled. */ uint32_t stencil_offset = (draw_y & ~tile_mask_y) * region->pitch + (draw_x & ~tile_mask_x) * 64; BEGIN_BATCH(3); OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2)); /* The stencil buffer has quirky pitch requirements. From Vol 2a, * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch": * The pitch must be set to 2x the value computed based on width, as * the stencil buffer is stored with two rows interleaved. */ OUT_BATCH(2 * region->pitch * region->cpp - 1); OUT_RELOC(region->bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, stencil_offset); ADVANCE_BATCH(); } else { BEGIN_BATCH(3); OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2)); OUT_BATCH(0); OUT_BATCH(0); ADVANCE_BATCH(); } } /* * On Gen >= 6, emit clear params for safety. If using hiz, then clear * params must be emitted. * * From Section 2.11.5.6.4.1 3DSTATE_CLEAR_PARAMS: * 3DSTATE_CLEAR_PARAMS packet must follow the DEPTH_BUFFER_STATE packet * when HiZ is enabled and the DEPTH_BUFFER_STATE changes. */ if (intel->gen >= 6 || hiz_region) { if (intel->gen == 6) intel_emit_post_sync_nonzero_flush(intel); BEGIN_BATCH(2); OUT_BATCH(_3DSTATE_CLEAR_PARAMS << 16 | GEN5_DEPTH_CLEAR_VALID | (2 - 2)); OUT_BATCH(depth_irb ? depth_irb->mt->depth_clear_value : 0); ADVANCE_BATCH(); } }