int intel_bo_set_tiling(struct intel_bo *bo, enum intel_tiling_mode tiling, unsigned long pitch) { uint32_t real_tiling = tiling; int err; switch (tiling) { case INTEL_TILING_X: if (pitch % 512) return -1; break; case INTEL_TILING_Y: if (pitch % 128) return -1; break; default: break; } err = drm_intel_bo_set_tiling(gem_bo(bo), &real_tiling, pitch); if (err || real_tiling != tiling) { assert(!"tiling mismatch"); return -1; } return 0; }
static int intel_buffer_set_tiling(cl_buffer bo, cl_image_tiling_t tiling, size_t stride) { uint32_t intel_tiling, required_tiling; int ret; if (UNLIKELY((get_intel_tiling(tiling, &intel_tiling)) < 0)) return -1; required_tiling = intel_tiling; ret = drm_intel_bo_set_tiling((drm_intel_bo*)bo, &intel_tiling, stride); assert(intel_tiling == required_tiling); return ret; }
struct intel_bo * intel_winsys_alloc_bo(struct intel_winsys *winsys, const char *name, enum intel_tiling_mode tiling, unsigned long pitch, unsigned long height, bool cpu_init) { const unsigned int alignment = 4096; /* always page-aligned */ unsigned long size; drm_intel_bo *bo; switch (tiling) { case INTEL_TILING_X: if (pitch % 512) return NULL; break; case INTEL_TILING_Y: if (pitch % 128) return NULL; break; default: break; } if (pitch > ULONG_MAX / height) return NULL; size = pitch * height; if (cpu_init) { bo = drm_intel_bo_alloc(winsys->bufmgr, name, size, alignment); } else { bo = drm_intel_bo_alloc_for_render(winsys->bufmgr, name, size, alignment); } if (bo && tiling != INTEL_TILING_NONE) { uint32_t real_tiling = tiling; int err; err = drm_intel_bo_set_tiling(bo, &real_tiling, pitch); if (err || real_tiling != tiling) { assert(!"tiling mismatch"); drm_intel_bo_unreference(bo); return NULL; } } return (struct intel_bo *) bo; }
drm_intel_bo *intel_allocate_framebuffer(ScrnInfoPtr scrn, int width, int height, int cpp, int *out_stride, uint32_t *out_tiling) { intel_screen_private *intel = intel_get_screen_private(scrn); uint32_t tiling; int stride, size; drm_intel_bo *bo; intel_set_gem_max_sizes(scrn); if (intel->tiling & INTEL_TILING_FB) tiling = I915_TILING_X; else tiling = I915_TILING_NONE; retry: size = intel_compute_size(intel, width, height, intel->cpp*8, 0, &tiling, &stride); if (!intel_check_display_stride(scrn, stride, tiling)) { if (tiling != I915_TILING_NONE) { tiling = I915_TILING_NONE; goto retry; } xf86DrvMsg(scrn->scrnIndex, X_ERROR, "Front buffer stride %d kB " "exceeds display limit\n", stride / 1024); return NULL; } bo = drm_intel_bo_alloc(intel->bufmgr, "front buffer", size, 0); if (bo == NULL) return FALSE; if (tiling != I915_TILING_NONE) drm_intel_bo_set_tiling(bo, &tiling, stride); xf86DrvMsg(scrn->scrnIndex, X_INFO, "Allocated new frame buffer %dx%d stride %d, %s\n", width, height, stride, tiling == I915_TILING_NONE ? "untiled" : "tiled"); drm_intel_bo_disable_reuse(bo); *out_stride = stride; *out_tiling = tiling; return bo; }
struct intel_region * intel_region_alloc(struct intel_context *intel, uint32_t tiling, GLuint cpp, GLuint width, GLuint height, GLuint pitch, GLboolean expect_accelerated_upload) { dri_bo *buffer; struct intel_region *region; /* If we're tiled, our allocations are in 8 or 32-row blocks, so * failure to align our height means that we won't allocate enough pages. * * If we're untiled, we still have to align to 2 rows high because the * data port accesses 2x2 blocks even if the bottom row isn't to be * rendered, so failure to align means we could walk off the end of the * GTT and fault. */ if (tiling == I915_TILING_X) height = ALIGN(height, 8); else if (tiling == I915_TILING_Y) height = ALIGN(height, 32); else height = ALIGN(height, 2); /* If we're untiled, we have to align to 2 rows high because the * data port accesses 2x2 blocks even if the bottom row isn't to be * rendered, so failure to align means we could walk off the end of the * GTT and fault. */ height = ALIGN(height, 2); if (expect_accelerated_upload) { buffer = drm_intel_bo_alloc_for_render(intel->bufmgr, "region", pitch * cpp * height, 64); } else { buffer = drm_intel_bo_alloc(intel->bufmgr, "region", pitch * cpp * height, 64); } region = intel_region_alloc_internal(intel, cpp, width, height, pitch, buffer); if (tiling != I915_TILING_NONE) { assert(((pitch * cpp) & 127) == 0); drm_intel_bo_set_tiling(buffer, &tiling, pitch * cpp); drm_intel_bo_get_tiling(buffer, ®ion->tiling, ®ion->bit_6_swizzle); } return region; }
static drm_intel_bo * create_bo(int fd, uint32_t start_val) { drm_intel_bo *bo; uint32_t tiling = I915_TILING_X; int ret, i; bo = drm_intel_bo_alloc(bufmgr, "tiled bo", 1024 * 1024, 4096); ret = drm_intel_bo_set_tiling(bo, &tiling, width * 4); igt_assert(ret == 0); igt_assert(tiling == I915_TILING_X); /* Fill the BO with dwords starting at start_val */ for (i = 0; i < 1024 * 1024 / 4; i++) linear[i] = start_val++; gem_write(fd, bo->handle, 0, linear, sizeof(linear)); return bo; }