static struct intel_buffer * intel_drm_buffer_from_handle(struct intel_drm_winsys *idws, const char* name, unsigned handle) { struct intel_drm_buffer *buf = CALLOC_STRUCT(intel_drm_buffer); uint32_t tile = 0, swizzle = 0; if (!buf) return NULL; buf->magic = 0xDEAD1337; buf->bo = drm_intel_bo_gem_create_from_name(idws->pools.gem, name, handle); buf->flinked = TRUE; buf->flink = handle; if (!buf->bo) goto err; drm_intel_bo_get_tiling(buf->bo, &tile, &swizzle); if (tile != INTEL_TILE_NONE) buf->map_gtt = TRUE; return (struct intel_buffer *)buf; err: FREE(buf); return NULL; }
struct intel_region * intel_region_alloc_for_fd(struct intel_screen *screen, GLuint cpp, GLuint width, GLuint height, GLuint pitch, int fd, const char *name) { struct intel_region *region; drm_intel_bo *buffer; int ret; uint32_t bit_6_swizzle, tiling; buffer = drm_intel_bo_gem_create_from_prime(screen->bufmgr, fd, height * pitch); if (buffer == NULL) return NULL; ret = drm_intel_bo_get_tiling(buffer, &tiling, &bit_6_swizzle); if (ret != 0) { fprintf(stderr, "Couldn't get tiling of buffer (%s): %s\n", name, strerror(-ret)); drm_intel_bo_unreference(buffer); return NULL; } region = intel_region_alloc_internal(screen, cpp, width, height, pitch, tiling, buffer); if (region == NULL) { drm_intel_bo_unreference(buffer); return NULL; } return region; }
static void intel_image_warn_if_unaligned(__DRIimage *image, const char *func) { uint32_t tiling, swizzle; drm_intel_bo_get_tiling(image->bo, &tiling, &swizzle); if (tiling != I915_TILING_NONE && (image->offset & 0xfff)) { _mesa_warning(NULL, "%s: offset 0x%08x not on tile boundary", func, image->offset); } }
cl_buffer intel_share_image_from_libva(cl_context ctx, unsigned int bo_name, struct _cl_mem_image *image) { drm_intel_bo *intel_bo; uint32_t intel_tiling, intel_swizzle_mode; intel_bo = intel_driver_share_buffer((intel_driver_t *)ctx->drv, "shared from libva", bo_name); drm_intel_bo_get_tiling(intel_bo, &intel_tiling, &intel_swizzle_mode); image->tiling = get_cl_tiling(intel_tiling); return (cl_buffer)intel_bo; }
struct intel_region * intel_region_alloc(struct intel_context *intel, uint32_t tiling, GLuint cpp, GLuint width, GLuint height, GLuint pitch, GLboolean expect_accelerated_upload) { dri_bo *buffer; struct intel_region *region; /* If we're tiled, our allocations are in 8 or 32-row blocks, so * failure to align our height means that we won't allocate enough pages. * * If we're untiled, we still have to align to 2 rows high because the * data port accesses 2x2 blocks even if the bottom row isn't to be * rendered, so failure to align means we could walk off the end of the * GTT and fault. */ if (tiling == I915_TILING_X) height = ALIGN(height, 8); else if (tiling == I915_TILING_Y) height = ALIGN(height, 32); else height = ALIGN(height, 2); /* If we're untiled, we have to align to 2 rows high because the * data port accesses 2x2 blocks even if the bottom row isn't to be * rendered, so failure to align means we could walk off the end of the * GTT and fault. */ height = ALIGN(height, 2); if (expect_accelerated_upload) { buffer = drm_intel_bo_alloc_for_render(intel->bufmgr, "region", pitch * cpp * height, 64); } else { buffer = drm_intel_bo_alloc(intel->bufmgr, "region", pitch * cpp * height, 64); } region = intel_region_alloc_internal(intel, cpp, width, height, pitch, buffer); if (tiling != I915_TILING_NONE) { assert(((pitch * cpp) & 127) == 0); drm_intel_bo_set_tiling(buffer, &tiling, pitch * cpp); drm_intel_bo_get_tiling(buffer, ®ion->tiling, ®ion->bit_6_swizzle); } return region; }
struct intel_bo * intel_winsys_import_handle(struct intel_winsys *winsys, const char *name, const struct winsys_handle *handle, unsigned long height, enum intel_tiling_mode *tiling, unsigned long *pitch) { uint32_t real_tiling, swizzle; drm_intel_bo *bo; int err; switch (handle->type) { case DRM_API_HANDLE_TYPE_SHARED: { const uint32_t gem_name = handle->handle; bo = drm_intel_bo_gem_create_from_name(winsys->bufmgr, name, gem_name); } break; case DRM_API_HANDLE_TYPE_FD: { const int fd = (int) handle->handle; bo = drm_intel_bo_gem_create_from_prime(winsys->bufmgr, fd, height * handle->stride); } break; default: bo = NULL; break; } if (!bo) return NULL; err = drm_intel_bo_get_tiling(bo, &real_tiling, &swizzle); if (err) { drm_intel_bo_unreference(bo); return NULL; } *tiling = real_tiling; *pitch = handle->stride; return (struct intel_bo *) bo; }
static bool test_address_swizzling(struct intel_winsys *winsys) { drm_intel_bo *bo; uint32_t tiling = I915_TILING_X, swizzle; unsigned long pitch; bo = drm_intel_bo_alloc_tiled(winsys->bufmgr, "address swizzling test", 64, 64, 4, &tiling, &pitch, 0); if (bo) { drm_intel_bo_get_tiling(bo, &tiling, &swizzle); drm_intel_bo_unreference(bo); } else { swizzle = I915_BIT_6_SWIZZLE_NONE; } return (swizzle != I915_BIT_6_SWIZZLE_NONE); }
static bool intel_detect_swizzling(struct intel_screen *screen) { drm_intel_bo *buffer; unsigned long flags = 0; unsigned long aligned_pitch; uint32_t tiling = I915_TILING_X; uint32_t swizzle_mode = 0; buffer = drm_intel_bo_alloc_tiled(screen->bufmgr, "swizzle test", 64, 64, 4, &tiling, &aligned_pitch, flags); if (buffer == NULL) return false; drm_intel_bo_get_tiling(buffer, &tiling, &swizzle_mode); drm_intel_bo_unreference(buffer); if (swizzle_mode == I915_BIT_6_SWIZZLE_NONE) return false; else return true; }
static struct i915_winsys_buffer * i915_drm_buffer_from_handle(struct i915_winsys *iws, struct winsys_handle *whandle, enum i915_winsys_buffer_tile *tiling, unsigned *stride) { struct i915_drm_winsys *idws = i915_drm_winsys(iws); struct i915_drm_buffer *buf; uint32_t tile = 0, swizzle = 0; if (whandle->type != DRM_API_HANDLE_TYPE_SHARED) return NULL; buf = CALLOC_STRUCT(i915_drm_buffer); if (!buf) return NULL; buf->magic = 0xDEAD1337; buf->bo = drm_intel_bo_gem_create_from_name(idws->gem_manager, "gallium3d_from_handle", whandle->handle); buf->flinked = TRUE; buf->flink = whandle->handle; if (!buf->bo) goto err; drm_intel_bo_get_tiling(buf->bo, &tile, &swizzle); *stride = whandle->stride; *tiling = tile; return (struct i915_winsys_buffer *)buf; err: FREE(buf); return NULL; }
void intel_blt_copy(struct intel_batchbuffer *batch, drm_intel_bo *src_bo, int src_x1, int src_y1, int src_pitch, drm_intel_bo *dst_bo, int dst_x1, int dst_y1, int dst_pitch, int width, int height, int bpp) { uint32_t src_tiling, dst_tiling, swizzle; uint32_t cmd_bits = 0; uint32_t br13_bits; drm_intel_bo_get_tiling(src_bo, &src_tiling, &swizzle); drm_intel_bo_get_tiling(dst_bo, &dst_tiling, &swizzle); if (IS_965(batch->devid) && src_tiling != I915_TILING_NONE) { src_pitch /= 4; cmd_bits |= XY_SRC_COPY_BLT_SRC_TILED; } if (IS_965(batch->devid) && dst_tiling != I915_TILING_NONE) { dst_pitch /= 4; cmd_bits |= XY_SRC_COPY_BLT_DST_TILED; } br13_bits = 0; switch (bpp) { case 8: break; case 16: /* supporting only RGB565, not ARGB1555 */ br13_bits |= 1 << 24; break; case 32: br13_bits |= 3 << 24; cmd_bits |= XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB; break; default: abort(); } #define CHECK_RANGE(x) ((x) >= 0 && (x) < (1 << 15)) assert(CHECK_RANGE(src_x1) && CHECK_RANGE(src_y1) && CHECK_RANGE(dst_x1) && CHECK_RANGE(dst_y1) && CHECK_RANGE(width) && CHECK_RANGE(height) && CHECK_RANGE(src_x1 + width) && CHECK_RANGE(src_y1 + height) && CHECK_RANGE(dst_x1 + width) && CHECK_RANGE(dst_y1 + height) && CHECK_RANGE(src_pitch) && CHECK_RANGE(dst_pitch)); #undef CHECK_RANGE BEGIN_BATCH(8); OUT_BATCH(XY_SRC_COPY_BLT_CMD | cmd_bits); OUT_BATCH((br13_bits) | (0xcc << 16) | /* copy ROP */ dst_pitch); OUT_BATCH((dst_y1 << 16) | dst_x1); /* dst x1,y1 */ OUT_BATCH(((dst_y1 + height) << 16) | (dst_x1 + width)); /* dst x2,y2 */ OUT_RELOC(dst_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0); OUT_BATCH((src_y1 << 16) | src_x1); /* src x1,y1 */ OUT_BATCH(src_pitch); OUT_RELOC(src_bo, I915_GEM_DOMAIN_RENDER, 0, 0); ADVANCE_BATCH(); intel_batchbuffer_flush(batch); }