void igt_spinner_fini(struct igt_spinner *spin) { igt_spinner_end(spin); i915_gem_object_unpin_map(spin->obj); i915_gem_object_put(spin->obj); i915_gem_object_unpin_map(spin->hws); i915_gem_object_put(spin->hws); }
static void hang_fini(struct hang *h) { *h->batch = MI_BATCH_BUFFER_END; i915_gem_chipset_flush(h->i915); i915_gem_object_unpin_map(h->obj); i915_gem_object_put(h->obj); i915_gem_object_unpin_map(h->hws); i915_gem_object_put(h->hws); kernel_context_close(h->ctx); igt_flush_test(h->i915, I915_WAIT_LOCKED); }
static void guc_log_unmap(struct intel_guc_log *log) { lockdep_assert_held(&log->relay.lock); i915_gem_object_unpin_map(log->vma->obj); log->relay.buf_addr = NULL; }
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_device *dev = obj->base.dev; mutex_lock(&dev->struct_mutex); i915_gem_object_unpin_map(obj); mutex_unlock(&dev->struct_mutex); }
static int hang_init(struct hang *h, struct drm_i915_private *i915) { void *vaddr; int err; memset(h, 0, sizeof(*h)); h->i915 = i915; h->ctx = kernel_context(i915); if (IS_ERR(h->ctx)) return PTR_ERR(h->ctx); h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(h->hws)) { err = PTR_ERR(h->hws); goto err_ctx; } h->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(h->obj)) { err = PTR_ERR(h->obj); goto err_hws; } i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC); vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); goto err_obj; } h->seqno = memset(vaddr, 0xff, PAGE_SIZE); vaddr = i915_gem_object_pin_map(h->obj, i915_coherent_map_type(i915)); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); goto err_unpin_hws; } h->batch = vaddr; return 0; err_unpin_hws: i915_gem_object_unpin_map(h->hws); err_obj: i915_gem_object_put(h->obj); err_hws: i915_gem_object_put(h->hws); err_ctx: kernel_context_close(h->ctx); return err; }
int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) { unsigned int mode; void *vaddr; int err; GEM_BUG_ON(INTEL_GEN(i915) < 8); memset(spin, 0, sizeof(*spin)); spin->i915 = i915; spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(spin->hws)) { err = PTR_ERR(spin->hws); goto err; } spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(spin->obj)) { err = PTR_ERR(spin->obj); goto err_hws; } i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC); vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); goto err_obj; } spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); mode = i915_coherent_map_type(i915); vaddr = i915_gem_object_pin_map(spin->obj, mode); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); goto err_unpin_hws; } spin->batch = vaddr; return 0; err_unpin_hws: i915_gem_object_unpin_map(spin->hws); err_obj: i915_gem_object_put(spin->obj); err_hws: i915_gem_object_put(spin->hws); err: return err; }
static int wc_get(struct drm_i915_gem_object *obj, unsigned long offset, u32 *v) { u32 *map; int err; err = i915_gem_object_set_to_wc_domain(obj, false); if (err) return err; map = i915_gem_object_pin_map(obj, I915_MAP_WC); if (IS_ERR(map)) return PTR_ERR(map); *v = map[offset / sizeof(*map)]; i915_gem_object_unpin_map(obj); return 0; }
static struct i915_request * hang_create_request(struct hang *h, struct intel_engine_cs *engine) { struct i915_request *rq; int err; if (i915_gem_object_is_active(h->obj)) { struct drm_i915_gem_object *obj; void *vaddr; obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE); if (IS_ERR(obj)) return ERR_CAST(obj); vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(h->i915)); if (IS_ERR(vaddr)) { i915_gem_object_put(obj); return ERR_CAST(vaddr); } i915_gem_object_unpin_map(h->obj); i915_gem_object_put(h->obj); h->obj = obj; h->batch = vaddr; } rq = i915_request_alloc(engine, h->ctx); if (IS_ERR(rq)) return rq; err = emit_recurse_batch(h, rq); if (err) { i915_request_add(rq); return ERR_PTR(err); } return rq; }
static int igt_dmabuf_import(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; void *obj_map, *dma_map; u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff }; int err, i; dmabuf = mock_dmabuf(1); if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf); obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf)); if (IS_ERR(obj)) { pr_err("i915_gem_prime_import failed with err=%d\n", (int)PTR_ERR(obj)); err = PTR_ERR(obj); goto out_dmabuf; } if (obj->base.dev != &i915->drm) { pr_err("i915_gem_prime_import created a non-i915 object!\n"); err = -EINVAL; goto out_obj; } if (obj->base.size != PAGE_SIZE) { pr_err("i915_gem_prime_import is wrong size found %lld, expected %ld\n", (long long)obj->base.size, PAGE_SIZE); err = -EINVAL; goto out_obj; } dma_map = dma_buf_vmap(dmabuf); if (!dma_map) { pr_err("dma_buf_vmap failed\n"); err = -ENOMEM; goto out_obj; } if (0) { /* Can not yet map dmabuf */ obj_map = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(obj_map)) { err = PTR_ERR(obj_map); pr_err("i915_gem_object_pin_map failed with err=%d\n", err); goto out_dma_map; } for (i = 0; i < ARRAY_SIZE(pattern); i++) { memset(dma_map, pattern[i], PAGE_SIZE); if (memchr_inv(obj_map, pattern[i], PAGE_SIZE)) { err = -EINVAL; pr_err("imported vmap not all set to %x!\n", pattern[i]); i915_gem_object_unpin_map(obj); goto out_dma_map; } } for (i = 0; i < ARRAY_SIZE(pattern); i++) { memset(obj_map, pattern[i], PAGE_SIZE); if (memchr_inv(dma_map, pattern[i], PAGE_SIZE)) { err = -EINVAL; pr_err("exported vmap not all set to %x!\n", pattern[i]); i915_gem_object_unpin_map(obj); goto out_dma_map; } } i915_gem_object_unpin_map(obj); } err = 0; out_dma_map: dma_buf_vunmap(dmabuf, dma_map); out_obj: i915_gem_object_put(obj); out_dmabuf: dma_buf_put(dmabuf); return err; }
static int igt_dmabuf_export_kmap(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; void *ptr; int err; obj = i915_gem_object_create(i915, 2*PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0); i915_gem_object_put(obj); if (IS_ERR(dmabuf)) { err = PTR_ERR(dmabuf); pr_err("i915_gem_prime_export failed with err=%d\n", err); return err; } ptr = dma_buf_kmap(dmabuf, 0); if (!ptr) { pr_err("dma_buf_kmap failed\n"); err = -ENOMEM; goto err; } if (memchr_inv(ptr, 0, PAGE_SIZE)) { dma_buf_kunmap(dmabuf, 0, ptr); pr_err("Exported page[0] not initialiased to zero!\n"); err = -EINVAL; goto err; } memset(ptr, 0xc5, PAGE_SIZE); dma_buf_kunmap(dmabuf, 0, ptr); ptr = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(ptr)) { err = PTR_ERR(ptr); pr_err("i915_gem_object_pin_map failed with err=%d\n", err); goto err; } memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE); i915_gem_object_unpin_map(obj); ptr = dma_buf_kmap(dmabuf, 1); if (!ptr) { pr_err("dma_buf_kmap failed\n"); err = -ENOMEM; goto err; } if (memchr_inv(ptr, 0xaa, PAGE_SIZE)) { dma_buf_kunmap(dmabuf, 1, ptr); pr_err("Exported page[1] not set to 0xaa!\n"); err = -EINVAL; goto err; } memset(ptr, 0xc5, PAGE_SIZE); dma_buf_kunmap(dmabuf, 1, ptr); ptr = dma_buf_kmap(dmabuf, 0); if (!ptr) { pr_err("dma_buf_kmap failed\n"); err = -ENOMEM; goto err; } if (memchr_inv(ptr, 0xc5, PAGE_SIZE)) { dma_buf_kunmap(dmabuf, 0, ptr); pr_err("Exported page[0] did not retain 0xc5!\n"); err = -EINVAL; goto err; } dma_buf_kunmap(dmabuf, 0, ptr); ptr = dma_buf_kmap(dmabuf, 2); if (ptr) { pr_err("Erroneously kmapped beyond the end of the object!\n"); dma_buf_kunmap(dmabuf, 2, ptr); err = -EINVAL; goto err; } ptr = dma_buf_kmap(dmabuf, -1); if (ptr) { pr_err("Erroneously kmapped before the start of the object!\n"); dma_buf_kunmap(dmabuf, -1, ptr); err = -EINVAL; goto err; } err = 0; err: dma_buf_put(dmabuf); return err; }