void i915_gem_context_free(struct kref *ctx_ref) { struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); int i; lockdep_assert_held(&ctx->i915->drm.struct_mutex); trace_i915_context_free(ctx); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); i915_ppgtt_put(ctx->ppgtt); for (i = 0; i < I915_NUM_ENGINES; i++) { struct intel_context *ce = &ctx->engine[i]; if (!ce->state) continue; WARN_ON(ce->pin_count); if (ce->ring) intel_ring_free(ce->ring); __i915_gem_object_release_unless_active(ce->state->obj); } kfree(ctx->name); put_pid(ctx->pid); list_del(&ctx->link); ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id); kfree(ctx); }
int i915_gem_render_state_emit(struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; struct intel_render_state so = {}; /* keep the compiler happy */ int err; so.rodata = render_state_get_rodata(engine); if (!so.rodata) return 0; if (so.rodata->batch_items * 4 > PAGE_SIZE) return -EINVAL; so.obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); if (IS_ERR(so.obj)) return PTR_ERR(so.obj); so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL); if (IS_ERR(so.vma)) { err = PTR_ERR(so.vma); goto err_obj; } err = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) goto err_vma; err = render_state_setup(&so, rq->i915); if (err) goto err_unpin; err = engine->emit_bb_start(rq, so.batch_offset, so.batch_size, I915_DISPATCH_SECURE); if (err) goto err_unpin; if (so.aux_size > 8) { err = engine->emit_bb_start(rq, so.aux_offset, so.aux_size, I915_DISPATCH_SECURE); if (err) goto err_unpin; } err = i915_vma_move_to_active(so.vma, rq, 0); err_unpin: i915_vma_unpin(so.vma); err_vma: i915_vma_close(so.vma); err_obj: __i915_gem_object_release_unless_active(so.obj); return err; }
void i915_vma_unpin_and_release(struct i915_vma **p_vma) { struct i915_vma *vma; struct drm_i915_gem_object *obj; vma = fetch_and_zero(p_vma); if (!vma) return; obj = vma->obj; i915_vma_unpin(vma); i915_vma_close(vma); __i915_gem_object_release_unless_active(obj); }
void i915_gem_render_state_fini(struct intel_engine_cs *engine) { struct intel_render_state *so; struct drm_i915_gem_object *obj; so = fetch_and_zero(&engine->render_state); if (!so) return; obj = so->vma->obj; i915_vma_close(so->vma); __i915_gem_object_release_unless_active(obj); kfree(so); }
/** * i915_gem_batch_pool_fini() - clean up a batch buffer pool * @pool: the pool to clean up * * Note: Callers must hold the struct_mutex. */ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool) { int n; lockdep_assert_held(&pool->engine->i915->drm.struct_mutex); for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { struct drm_i915_gem_object *obj, *next; list_for_each_entry_safe(obj, next, &pool->cache_list[n], batch_pool_link) __i915_gem_object_release_unless_active(obj); INIT_LIST_HEAD(&pool->cache_list[n]); } }