struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags) { struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags); }
bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (obj_priv->gtt_space == NULL) return true; if (tiling_mode == I915_TILING_NONE) return true; if (!IS_I965G(dev)) { if (obj_priv->gtt_offset & (obj->size - 1)) return false; if (IS_I9XX(dev)) { if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) return false; } else { if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) return false; } } return true; }
cairo_surface_t * intel_surface_map_to_image (void *abstract_surface) { intel_surface_t *surface = abstract_surface; if (surface->drm.fallback == NULL) { cairo_surface_t *image; cairo_status_t status; void *ptr; if (surface->drm.base.backend->flush != NULL) { status = surface->drm.base.backend->flush (surface); if (unlikely (status)) return _cairo_surface_create_in_error (status); } ptr = intel_bo_map (to_intel_device (surface->drm.base.device), to_intel_bo (surface->drm.bo)); if (unlikely (ptr == NULL)) return _cairo_surface_create_in_error (CAIRO_STATUS_NO_MEMORY); image = cairo_image_surface_create_for_data (ptr, surface->drm.format, surface->drm.width, surface->drm.height, surface->drm.stride); if (unlikely (image->status)) return image; surface->drm.fallback = image; } return surface->drm.fallback; }
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page_count = obj->size >> PAGE_SHIFT; int i; if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) return; if (obj_priv->bit_17 == NULL) return; for (i = 0; i < page_count; i++) { char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; if ((new_bit_17 & 0x1) != (test_bit(i, obj_priv->bit_17) != 0)) { int ret = i915_gem_swizzle_page(obj_priv->pages[i]); if (ret != 0) { DRM_ERROR("Failed to swizzle page\n"); return; } set_page_dirty(obj_priv->pages[i]); } } }
void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page_count = obj->size >> PAGE_SHIFT; int i; if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) return; if (obj_priv->bit_17 == NULL) { obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * sizeof(long), GFP_KERNEL); if (obj_priv->bit_17 == NULL) { DRM_ERROR("Failed to allocate memory for bit 17 " "record\n"); return; } } for (i = 0; i < page_count; i++) { if (page_to_phys(obj_priv->pages[i]) & (1 << 17)) __set_bit(i, obj_priv->bit_17); else __clear_bit(i, obj_priv->bit_17); } }
cairo_status_t intel_surface_finish (void *abstract_surface) { intel_surface_t *surface = abstract_surface; intel_bo_in_flight_add (to_intel_device (surface->drm.base.device), to_intel_bo (surface->drm.bo)); return _cairo_drm_surface_finish (&surface->drm); }
static cairo_status_t intel_surface_enable_scan_out (void *abstract_surface) { intel_surface_t *surface = abstract_surface; if (unlikely (surface->drm.bo == NULL)) return _cairo_error (CAIRO_STATUS_INVALID_SIZE); to_intel_bo (surface->drm.bo)->tiling = I915_TILING_X; return CAIRO_STATUS_SUCCESS; }
struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags) { struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); if (obj->ops->dmabuf_export) { int ret = obj->ops->dmabuf_export(obj); if (ret) return ERR_PTR(ret); } return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags, NULL); }
static int igt_dmabuf_import_ownership(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; void *ptr; int err; dmabuf = mock_dmabuf(1); if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf); ptr = dma_buf_vmap(dmabuf); if (!ptr) { pr_err("dma_buf_vmap failed\n"); err = -ENOMEM; goto err_dmabuf; } memset(ptr, 0xc5, PAGE_SIZE); dma_buf_vunmap(dmabuf, ptr); obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf)); if (IS_ERR(obj)) { pr_err("i915_gem_prime_import failed with err=%d\n", (int)PTR_ERR(obj)); err = PTR_ERR(obj); goto err_dmabuf; } dma_buf_put(dmabuf); err = i915_gem_object_pin_pages(obj); if (err) { pr_err("i915_gem_object_pin_pages failed with err=%d\n", err); goto out_obj; } err = 0; i915_gem_object_unpin_pages(obj); out_obj: i915_gem_object_put(obj); return err; err_dmabuf: dma_buf_put(dmabuf); return err; }
cairo_status_t intel_surface_acquire_source_image (void *abstract_surface, cairo_image_surface_t **image_out, void **image_extra) { intel_surface_t *surface = abstract_surface; cairo_surface_t *image; cairo_status_t status; void *ptr; if (surface->drm.fallback != NULL) { image = surface->drm.fallback; goto DONE; } image = _cairo_surface_has_snapshot (&surface->drm.base, &_cairo_image_surface_backend); if (image != NULL) goto DONE; if (surface->drm.base.backend->flush != NULL) { status = surface->drm.base.backend->flush (surface); if (unlikely (status)) return status; } ptr = intel_bo_map (to_intel_device (surface->drm.base.device), to_intel_bo (surface->drm.bo)); if (unlikely (ptr == NULL)) return _cairo_error (CAIRO_STATUS_NO_MEMORY); image = cairo_image_surface_create_for_data (ptr, surface->drm.format, surface->drm.width, surface->drm.height, surface->drm.stride); if (unlikely (image->status)) return image->status; _cairo_surface_attach_snapshot (&surface->drm.base, image, surface_finish_and_destroy); DONE: *image_out = (cairo_image_surface_t *) cairo_surface_reference (image); *image_extra = NULL; return CAIRO_STATUS_SUCCESS; }
static int igt_dmabuf_import_self(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct drm_gem_object *import; struct dma_buf *dmabuf; int err; obj = i915_gem_object_create(i915, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0); if (IS_ERR(dmabuf)) { pr_err("i915_gem_prime_export failed with err=%d\n", (int)PTR_ERR(dmabuf)); err = PTR_ERR(dmabuf); goto out; } import = i915_gem_prime_import(&i915->drm, dmabuf); if (IS_ERR(import)) { pr_err("i915_gem_prime_import failed with err=%d\n", (int)PTR_ERR(import)); err = PTR_ERR(import); goto out_dmabuf; } if (import != &obj->base) { pr_err("i915_gem_prime_import created a new object!\n"); err = -EINVAL; goto out_import; } err = 0; out_import: i915_gem_object_put(to_intel_bo(import)); out_dmabuf: dma_buf_put(dmabuf); out: i915_gem_object_put(obj); return err; }
/** * Returns the current tiling mode and required bit 6 swizzling for the object. */ int i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_i915_gem_get_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EINVAL; obj_priv = to_intel_bo(obj); mutex_lock(&dev->struct_mutex); args->tiling_mode = obj_priv->tiling_mode; switch (obj_priv->tiling_mode) { case I915_TILING_X: args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; break; case I915_TILING_Y: args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; break; case I915_TILING_NONE: args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; break; default: DRM_ERROR("unknown tiling mode\n"); } /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return 0; }
struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags) { struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); DEFINE_DMA_BUF_EXPORT_INFO(exp_info); exp_info.ops = &i915_dmabuf_ops; exp_info.size = gem_obj->size; exp_info.flags = flags; exp_info.priv = gem_obj; if (obj->ops->dmabuf_export) { int ret = obj->ops->dmabuf_export(obj); if (ret) return ERR_PTR(ret); } return dma_buf_export(&exp_info); }
/** * Sets the tiling mode of an object, returning the required swizzling of * bit 6 of addresses in the object. */ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_set_tiling *args = data; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; int ret = 0; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) return -ENOENT; if (!i915_tiling_ok(dev, args->stride, obj->base.size, args->tiling_mode)) { drm_gem_object_unreference_unlocked(&obj->base); return -EINVAL; } if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { drm_gem_object_unreference_unlocked(&obj->base); return -EBUSY; } if (args->tiling_mode == I915_TILING_NONE) { args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } else { if (args->tiling_mode == I915_TILING_X) args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; else args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; /* Hide bit 17 swizzling from the user. This prevents old Mesa * from aborting the application on sw fallbacks to bit 17, * and we use the pread/pwrite bit17 paths to swizzle for it. * If there was a user that was relying on the swizzle * information for drm_intel_bo_map()ed reads/writes this would * break it, but we don't have any of those. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; /* If we can't handle the swizzling, make it untiled. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } } mutex_lock(&dev->struct_mutex); if (args->tiling_mode != obj->tiling_mode || args->stride != obj->stride) { /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but * need to ensure that any fence register is updated before * the next fenced (either through the GTT or by the BLT unit * on older GPUs) access. * * After updating the tiling parameters, we then flag whether * we need to update an associated fence register. Note this * has to also include the unfenced register the GPU uses * whilst executing a fenced command for an untiled object. */ obj->map_and_fenceable = !i915_gem_obj_ggtt_bound(obj) || (i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end && i915_gem_object_fence_ok(obj, args->tiling_mode)); /* Rebind if we need a change of alignment */ if (!obj->map_and_fenceable) { u32 unfenced_align = i915_gem_get_gtt_alignment(dev, obj->base.size, args->tiling_mode, false); if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1)) ret = i915_gem_object_ggtt_unbind(obj); } if (ret == 0) { obj->fence_dirty = obj->last_fenced_seqno || obj->fence_reg != I915_FENCE_REG_NONE; obj->tiling_mode = args->tiling_mode; obj->stride = args->stride; /* Force the fence to be reacquired for GTT access */ i915_gem_release_mmap(obj); } } /* we have to maintain this existing ABI... */ args->stride = obj->stride; args->tiling_mode = obj->tiling_mode; /* Try to preallocate memory required to save swizzling on put-pages */ if (i915_gem_object_needs_bit17_swizzle(obj)) { if (obj->bit_17 == NULL) { obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT), sizeof(long), GFP_KERNEL); } } else {
static int igt_dmabuf_import(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; void *obj_map, *dma_map; u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff }; int err, i; dmabuf = mock_dmabuf(1); if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf); obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf)); if (IS_ERR(obj)) { pr_err("i915_gem_prime_import failed with err=%d\n", (int)PTR_ERR(obj)); err = PTR_ERR(obj); goto out_dmabuf; } if (obj->base.dev != &i915->drm) { pr_err("i915_gem_prime_import created a non-i915 object!\n"); err = -EINVAL; goto out_obj; } if (obj->base.size != PAGE_SIZE) { pr_err("i915_gem_prime_import is wrong size found %lld, expected %ld\n", (long long)obj->base.size, PAGE_SIZE); err = -EINVAL; goto out_obj; } dma_map = dma_buf_vmap(dmabuf); if (!dma_map) { pr_err("dma_buf_vmap failed\n"); err = -ENOMEM; goto out_obj; } if (0) { /* Can not yet map dmabuf */ obj_map = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(obj_map)) { err = PTR_ERR(obj_map); pr_err("i915_gem_object_pin_map failed with err=%d\n", err); goto out_dma_map; } for (i = 0; i < ARRAY_SIZE(pattern); i++) { memset(dma_map, pattern[i], PAGE_SIZE); if (memchr_inv(obj_map, pattern[i], PAGE_SIZE)) { err = -EINVAL; pr_err("imported vmap not all set to %x!\n", pattern[i]); i915_gem_object_unpin_map(obj); goto out_dma_map; } } for (i = 0; i < ARRAY_SIZE(pattern); i++) { memset(obj_map, pattern[i], PAGE_SIZE); if (memchr_inv(dma_map, pattern[i], PAGE_SIZE)) { err = -EINVAL; pr_err("exported vmap not all set to %x!\n", pattern[i]); i915_gem_object_unpin_map(obj); goto out_dma_map; } } i915_gem_object_unpin_map(obj); } err = 0; out_dma_map: dma_buf_vunmap(dmabuf, dma_map); out_obj: i915_gem_object_put(obj); out_dmabuf: dma_buf_put(dmabuf); return err; }
/** * Sets the tiling mode of an object, returning the required swizzling of * bit 6 of addresses in the object. */ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_i915_gem_set_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; int ret = 0; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EINVAL; obj_priv = to_intel_bo(obj); if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { drm_gem_object_unreference_unlocked(obj); return -EINVAL; } if (args->tiling_mode == I915_TILING_NONE) { args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } else { if (args->tiling_mode == I915_TILING_X) args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; else args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; /* Hide bit 17 swizzling from the user. This prevents old Mesa * from aborting the application on sw fallbacks to bit 17, * and we use the pread/pwrite bit17 paths to swizzle for it. * If there was a user that was relying on the swizzle * information for drm_intel_bo_map()ed reads/writes this would * break it, but we don't have any of those. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; /* If we can't handle the swizzling, make it untiled. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } } mutex_lock(&dev->struct_mutex); if (args->tiling_mode != obj_priv->tiling_mode || args->stride != obj_priv->stride) { /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but * need to ensure that any fence register is cleared. */ if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) ret = i915_gem_object_unbind(obj); else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) ret = i915_gem_object_put_fence_reg(obj); else i915_gem_release_mmap(obj); if (ret != 0) { WARN(ret != -ERESTARTSYS, "failed to reset object for tiling switch"); args->tiling_mode = obj_priv->tiling_mode; args->stride = obj_priv->stride; goto err; } obj_priv->tiling_mode = args->tiling_mode; obj_priv->stride = args->stride; } err: drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return ret; }
/** * Sets the tiling mode of an object, returning the required swizzling of * bit 6 of addresses in the object. */ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_set_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; int ret; ret = i915_gem_check_is_wedged(dev); if (ret) return ret; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) return -ENOENT; if (!i915_tiling_ok(dev, args->stride, obj->base.size, args->tiling_mode)) { drm_gem_object_unreference_unlocked(&obj->base); return -EINVAL; } if (obj->pin_count) { drm_gem_object_unreference_unlocked(&obj->base); return -EBUSY; } if (args->tiling_mode == I915_TILING_NONE) { args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } else { if (args->tiling_mode == I915_TILING_X) args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; else args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; /* Hide bit 17 swizzling from the user. This prevents old Mesa * from aborting the application on sw fallbacks to bit 17, * and we use the pread/pwrite bit17 paths to swizzle for it. * If there was a user that was relying on the swizzle * information for drm_intel_bo_map()ed reads/writes this would * break it, but we don't have any of those. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; /* If we can't handle the swizzling, make it untiled. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } } mutex_lock(&dev->struct_mutex); if (args->tiling_mode != obj->tiling_mode || args->stride != obj->stride) { /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but * need to ensure that any fence register is cleared. */ i915_gem_release_mmap(obj); obj->map_and_fenceable = obj->gtt_space == NULL || (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && i915_gem_object_fence_ok(obj, args->tiling_mode)); obj->tiling_changed = true; obj->tiling_mode = args->tiling_mode; obj->stride = args->stride; } drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); return 0; }
/** * Sets the tiling mode of an object, returning the required swizzling of * bit 6 of addresses in the object. */ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_set_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; int ret = 0; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) return -ENOENT; if (!i915_tiling_ok(dev, args->stride, obj->base.size, args->tiling_mode)) { drm_gem_object_unreference_unlocked(&obj->base); return -EINVAL; } if (obj->pin_count) { drm_gem_object_unreference_unlocked(&obj->base); return -EBUSY; } if (args->tiling_mode == I915_TILING_NONE) { args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } else { if (args->tiling_mode == I915_TILING_X) args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; else args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; /* Hide bit 17 swizzling from the user. This prevents old Mesa * from aborting the application on sw fallbacks to bit 17, * and we use the pread/pwrite bit17 paths to swizzle for it. * If there was a user that was relying on the swizzle * information for drm_intel_bo_map()ed reads/writes this would * break it, but we don't have any of those. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; /* If we can't handle the swizzling, make it untiled. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } } DRM_LOCK(dev); if (args->tiling_mode != obj->tiling_mode || args->stride != obj->stride) { /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but * need to ensure that any fence register is cleared. * * After updating the tiling parameters, we then flag whether * we need to update an associated fence register. Note this * has to also include the unfenced register the GPU uses * whilst executing a fenced command for an untiled object. */ i915_gem_release_mmap(obj); obj->map_and_fenceable = obj->gtt_space == NULL || (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && i915_gem_object_fence_ok(obj, args->tiling_mode)); /* Rebind if we need a change of alignment */ if (!obj->map_and_fenceable) { u32 unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(dev, obj->base.size, args->tiling_mode); if (obj->gtt_offset & (unfenced_alignment - 1)) ret = i915_gem_object_unbind(obj); } if (ret == 0) { obj->fence_dirty = obj->fenced_gpu_access || obj->fence_reg != I915_FENCE_REG_NONE; obj->tiling_mode = args->tiling_mode; obj->stride = args->stride; } } /* we have to maintain this existing ABI... */ args->stride = obj->stride; args->tiling_mode = obj->tiling_mode; drm_gem_object_unreference(&obj->base); DRM_UNLOCK(dev); return ret; }
static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) { return to_intel_bo(buf->priv); }
static cairo_status_t i965_surface_mask_internal (i965_surface_t *dst, cairo_operator_t op, const cairo_pattern_t *source, i965_surface_t *mask, cairo_clip_t *clip, const cairo_composite_rectangles_t *extents) { i965_device_t *device; i965_shader_t shader; cairo_region_t *clip_region = NULL; cairo_status_t status; i965_shader_init (&shader, dst, op); status = i965_shader_acquire_pattern (&shader, &shader.source, source, &extents->bounded); if (unlikely (status)) return status; shader.mask.type.vertex = VS_NONE; shader.mask.type.fragment = FS_SURFACE; shader.mask.base.content = mask->intel.drm.base.content; shader.mask.base.filter = i965_filter (CAIRO_FILTER_NEAREST); shader.mask.base.extend = i965_extend (CAIRO_EXTEND_NONE); cairo_matrix_init_translate (&shader.mask.base.matrix, -extents->bounded.x, -extents->bounded.y); cairo_matrix_scale (&shader.mask.base.matrix, 1. / mask->intel.drm.width, 1. / mask->intel.drm.height); shader.mask.base.bo = to_intel_bo (mask->intel.drm.bo); shader.mask.base.format = mask->intel.drm.format; shader.mask.base.width = mask->intel.drm.width; shader.mask.base.height = mask->intel.drm.height; shader.mask.base.stride = mask->intel.drm.stride; if (clip != NULL) { status = _cairo_clip_get_region (clip, &clip_region); assert (status == CAIRO_STATUS_SUCCESS || status == CAIRO_INT_STATUS_UNSUPPORTED); if (clip_region != NULL && cairo_region_num_rectangles (clip_region) == 1) clip_region = NULL; if (status == CAIRO_INT_STATUS_UNSUPPORTED) i965_shader_set_clip (&shader, clip); } status = cairo_device_acquire (dst->intel.drm.base.device); if (unlikely (status)) goto CLEANUP_SHADER; device = i965_device (dst); status = i965_shader_commit (&shader, device); if (unlikely (status)) goto CLEANUP_DEVICE; if (clip_region != NULL) { unsigned int n, num_rectangles; num_rectangles = cairo_region_num_rectangles (clip_region); for (n = 0; n < num_rectangles; n++) { cairo_rectangle_int_t rect; cairo_region_get_rectangle (clip_region, n, &rect); i965_shader_add_rectangle (&shader, rect.x, rect.y, rect.width, rect.height); } } else { i965_shader_add_rectangle (&shader, extents->bounded.x, extents->bounded.y, extents->bounded.width, extents->bounded.height); } if (! extents->is_bounded) status = i965_fixup_unbounded (dst, extents, clip); CLEANUP_DEVICE: cairo_device_release (&device->intel.base.base); CLEANUP_SHADER: i965_shader_fini (&shader); return status; }