/** * Flush the command parser and execute the commands. When the parser buffer * is empty, the callback is not invoked. */ void ilo_cp_flush_internal(struct ilo_cp *cp) { int err; ilo_cp_set_owner(cp, NULL, 0); /* sanity check */ assert(cp->bo_size == cp->size + cp->stolen + ilo_cp_private); if (!cp->used) { /* return the space stolen and etc. */ ilo_cp_clear_buffer(cp); return; } ilo_cp_end_buffer(cp); /* upload and execute */ err = ilo_cp_upload_buffer(cp); if (likely(!err)) err = ilo_cp_exec_bo(cp); if (likely(!err && cp->flush_callback)) cp->flush_callback(cp, cp->flush_callback_data); ilo_cp_clear_buffer(cp); ilo_cp_realloc_bo(cp); }
static struct intel_bo * ilo_cp_end_batch(struct ilo_cp *cp, unsigned *used) { struct intel_bo *bo; ilo_cp_set_owner(cp, NULL, 0); if (!ilo_builder_batch_used(&cp->builder)) { ilo_builder_batch_discard(&cp->builder); return NULL; } /* see ilo_cp_space() */ assert(ilo_builder_batch_space(&cp->builder) >= 2); ilo_builder_batch_mi_batch_buffer_end(&cp->builder); bo = ilo_builder_end(&cp->builder, used); /* we have to assume that kernel uploads also failed */ if (!bo) ilo_shader_cache_invalidate(cp->shader_cache); return bo; }
static void ilo_draw_set_owner(struct ilo_context *ilo) { ilo_cp_set_owner(ilo->cp, INTEL_RING_RENDER, &ilo->draw.cp_owner); }