static void vmw_swc_destroy(struct svga_winsys_context *swc) { struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); unsigned i; for(i = 0; i < vswc->surface.used; ++i) { struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i]; if (isurf->referenced) p_atomic_dec(&isurf->vsurf->validated); vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL); } for(i = 0; i < vswc->shader.used; ++i) { struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i]; if (ishader->referenced) p_atomic_dec(&ishader->vshader->validated); vmw_svga_winsys_shader_reference(&ishader->vshader, NULL); } util_hash_table_destroy(vswc->hash); pb_validate_destroy(vswc->validate); vmw_ioctl_context_destroy(vswc->vws, swc->cid); #ifdef DEBUG debug_flush_ctx_destroy(vswc->fctx); #endif FREE(vswc); }
static void vmw_swc_region_relocation(struct svga_winsys_context *swc, struct SVGAGuestPtr *where, struct svga_winsys_buffer *buffer, uint32 offset, unsigned flags) { struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); struct vmw_buffer_relocation *reloc; assert(vswc->region.staged < vswc->region.reserved); reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged]; reloc->region.where = where; /* * pb_validate holds a refcount to the buffer, so no need to * refcount it again in the relocation. */ reloc->buffer = vmw_pb_buffer(buffer); reloc->offset = offset; reloc->is_mob = FALSE; ++vswc->region.staged; if (vmw_swc_add_validate_buffer(vswc, reloc->buffer, flags)) { vswc->seen_regions += reloc->buffer->size; if(vswc->seen_regions >= VMW_GMR_POOL_SIZE/5) vswc->preemptive_flush = TRUE; } #ifdef DEBUG if (!(flags & SVGA_RELOC_INTERNAL)) debug_flush_cb_reference(vswc->fctx, vmw_debug_flush_buf(buffer)); #endif }
static void vmw_swc_commit(struct svga_winsys_context *swc) { struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); assert(vswc->command.reserved); assert(vswc->command.used + vswc->command.reserved <= vswc->command.size); vswc->command.used += vswc->command.reserved; vswc->command.reserved = 0; assert(vswc->surface.staged <= vswc->surface.reserved); assert(vswc->surface.used + vswc->surface.staged <= vswc->surface.size); vswc->surface.used += vswc->surface.staged; vswc->surface.staged = 0; vswc->surface.reserved = 0; assert(vswc->shader.staged <= vswc->shader.reserved); assert(vswc->shader.used + vswc->shader.staged <= vswc->shader.size); vswc->shader.used += vswc->shader.staged; vswc->shader.staged = 0; vswc->shader.reserved = 0; assert(vswc->region.staged <= vswc->region.reserved); assert(vswc->region.used + vswc->region.staged <= vswc->region.size); vswc->region.used += vswc->region.staged; vswc->region.staged = 0; vswc->region.reserved = 0; }
/** * vmw_svga_winsys_vgpu10_shader_destroy - The winsys shader_destroy callback. * * @swc: The winsys context. * @shader: A shader structure previously allocated by shader_create. * * Frees the shader structure and the buffer holding the shader code. */ static void vmw_svga_winsys_vgpu10_shader_destroy(struct svga_winsys_context *swc, struct svga_winsys_gb_shader *shader) { struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); vmw_svga_winsys_shader_destroy(&vswc->vws->base, shader); }
/** * vmw_swc_surface_clear_reference - Clear referenced info for a surface * * @swc: Pointer to an svga_winsys_context * @vsurf: Pointer to a vmw_svga_winsys_surface, the referenced info of which * we want to clear * * This is primarily used by a discard surface map to indicate that the * surface data is no longer referenced by a draw call, and mapping it * should therefore no longer cause a flush. */ void vmw_swc_surface_clear_reference(struct svga_winsys_context *swc, struct vmw_svga_winsys_surface *vsurf) { struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); struct vmw_ctx_validate_item *isrf = util_hash_table_get(vswc->hash, vsurf); if (isrf && isrf->referenced) { isrf->referenced = FALSE; p_atomic_dec(&vsurf->validated); } }
static void vmw_swc_shader_relocation(struct svga_winsys_context *swc, uint32 *shid, uint32 *mobid, uint32 *offset, struct svga_winsys_gb_shader *shader, unsigned flags) { struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); struct vmw_winsys_screen *vws = vswc->vws; struct vmw_svga_winsys_shader *vshader; struct vmw_ctx_validate_item *ishader; if(!shader) { *shid = SVGA3D_INVALID_ID; return; } vshader = vmw_svga_winsys_shader(shader); if (!vws->base.have_vgpu10) { assert(vswc->shader.staged < vswc->shader.reserved); ishader = util_hash_table_get(vswc->hash, vshader); if (ishader == NULL) { ishader = &vswc->shader.items[vswc->shader.used + vswc->shader.staged]; vmw_svga_winsys_shader_reference(&ishader->vshader, vshader); ishader->referenced = FALSE; /* * Note that a failure here may just fall back to unhashed behavior * and potentially cause unnecessary flushing, so ignore the * return code. */ (void) util_hash_table_set(vswc->hash, vshader, ishader); ++vswc->shader.staged; } if (!ishader->referenced) { ishader->referenced = TRUE; p_atomic_inc(&vshader->validated); } } if (shid) *shid = vshader->shid; if (vshader->buf) vmw_swc_mob_relocation(swc, mobid, offset, vshader->buf, 0, SVGA_RELOC_READ); }
static void * vmw_swc_reserve(struct svga_winsys_context *swc, uint32_t nr_bytes, uint32_t nr_relocs ) { struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); #ifdef DEBUG /* Check if somebody forgot to check the previous failure */ if(vswc->must_flush) { debug_printf("Forgot to flush:\n"); debug_backtrace_dump(vswc->must_flush_stack, VMW_MUST_FLUSH_STACK); assert(!vswc->must_flush); } debug_flush_might_flush(vswc->fctx); #endif assert(nr_bytes <= vswc->command.size); if(nr_bytes > vswc->command.size) return NULL; if(vswc->preemptive_flush || vswc->command.used + nr_bytes > vswc->command.size || vswc->surface.used + nr_relocs > vswc->surface.size || vswc->shader.used + nr_relocs > vswc->shader.size || vswc->region.used + nr_relocs > vswc->region.size) { #ifdef DEBUG vswc->must_flush = TRUE; debug_backtrace_capture(vswc->must_flush_stack, 1, VMW_MUST_FLUSH_STACK); #endif return NULL; } assert(vswc->command.used + nr_bytes <= vswc->command.size); assert(vswc->surface.used + nr_relocs <= vswc->surface.size); assert(vswc->shader.used + nr_relocs <= vswc->shader.size); assert(vswc->region.used + nr_relocs <= vswc->region.size); vswc->command.reserved = nr_bytes; vswc->surface.reserved = nr_relocs; vswc->surface.staged = 0; vswc->shader.reserved = nr_relocs; vswc->shader.staged = 0; vswc->region.reserved = nr_relocs; vswc->region.staged = 0; return vswc->command.buffer + vswc->command.used; }
static void vmw_swc_mob_relocation(struct svga_winsys_context *swc, SVGAMobId *id, uint32 *offset_into_mob, struct svga_winsys_buffer *buffer, uint32 offset, unsigned flags) { struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); struct vmw_buffer_relocation *reloc; struct pb_buffer *pb_buffer = vmw_pb_buffer(buffer); if (id) { assert(vswc->region.staged < vswc->region.reserved); reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged]; reloc->mob.id = id; reloc->mob.offset_into_mob = offset_into_mob; /* * pb_validate holds a refcount to the buffer, so no need to * refcount it again in the relocation. */ reloc->buffer = pb_buffer; reloc->offset = offset; reloc->is_mob = TRUE; ++vswc->region.staged; } if (vmw_swc_add_validate_buffer(vswc, pb_buffer, flags)) { vswc->seen_mobs += pb_buffer->size; if ((swc->hints & SVGA_HINT_FLAG_CAN_PRE_FLUSH) && vswc->seen_mobs >= vswc->vws->ioctl.max_mob_memory / VMW_MAX_MOB_MEM_FACTOR) vswc->preemptive_flush = TRUE; } #ifdef DEBUG if (!(flags & SVGA_RELOC_INTERNAL)) debug_flush_cb_reference(vswc->fctx, vmw_debug_flush_buf(buffer)); #endif }
/** * vmw_svga_winsys_vgpu10_shader_screate - The winsys shader_crate callback * * @swc: The winsys context. * @shaderId: Previously allocated shader id. * @shaderType: The shader type. * @bytecode: The shader bytecode * @bytecodelen: The length of the bytecode. * * Creates an svga_winsys_gb_shader structure and allocates a buffer for the * shader code and copies the shader code into the buffer. Shader * resource creation is not done. */ static struct svga_winsys_gb_shader * vmw_svga_winsys_vgpu10_shader_create(struct svga_winsys_context *swc, uint32 shaderId, SVGA3dShaderType shaderType, const uint32 *bytecode, uint32 bytecodeLen) { struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); struct vmw_svga_winsys_shader *shader; struct svga_winsys_gb_shader *gb_shader = vmw_svga_winsys_shader_create(&vswc->vws->base, shaderType, bytecode, bytecodeLen); if (!gb_shader) return NULL; shader = vmw_svga_winsys_shader(gb_shader); shader->shid = shaderId; return gb_shader; }
static void vmw_swc_surface_only_relocation(struct svga_winsys_context *swc, uint32 *where, struct vmw_svga_winsys_surface *vsurf, unsigned flags) { struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); struct vmw_ctx_validate_item *isrf; assert(vswc->surface.staged < vswc->surface.reserved); isrf = util_hash_table_get(vswc->hash, vsurf); if (isrf == NULL) { isrf = &vswc->surface.items[vswc->surface.used + vswc->surface.staged]; vmw_svga_winsys_surface_reference(&isrf->vsurf, vsurf); isrf->referenced = FALSE; /* * Note that a failure here may just fall back to unhashed behavior * and potentially cause unnecessary flushing, so ignore the * return code. */ (void) util_hash_table_set(vswc->hash, vsurf, isrf); ++vswc->surface.staged; vswc->seen_surfaces += vsurf->size; if ((swc->hints & SVGA_HINT_FLAG_CAN_PRE_FLUSH) && vswc->seen_surfaces >= vswc->vws->ioctl.max_surface_memory / VMW_MAX_SURF_MEM_FACTOR) vswc->preemptive_flush = TRUE; } if (!(flags & SVGA_RELOC_INTERNAL) && !isrf->referenced) { isrf->referenced = TRUE; p_atomic_inc(&vsurf->validated); } if (where) *where = vsurf->sid; }
static void vmw_swc_mob_relocation(struct svga_winsys_context *swc, SVGAMobId *id, uint32 *offset_into_mob, struct svga_winsys_buffer *buffer, uint32 offset, unsigned flags) { struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); struct vmw_buffer_relocation *reloc; assert(vswc->region.staged < vswc->region.reserved); reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged]; reloc->mob.id = id; reloc->mob.offset_into_mob = offset_into_mob; /* * pb_validate holds a refcount to the buffer, so no need to * refcount it again in the relocation. */ reloc->buffer = vmw_pb_buffer(buffer); reloc->offset = offset; reloc->is_mob = TRUE; ++vswc->region.staged; if (vmw_swc_add_validate_buffer(vswc, reloc->buffer, flags)) { vswc->seen_mobs += reloc->buffer->size; /* divide by 5, tested for best performance */ if (vswc->seen_mobs >= vswc->vws->ioctl.max_mob_memory / 5) vswc->preemptive_flush = TRUE; } #ifdef DEBUG if (!(flags & SVGA_RELOC_INTERNAL)) debug_flush_cb_reference(vswc->fctx, vmw_debug_flush_buf(buffer)); #endif }
static enum pipe_error vmw_swc_flush(struct svga_winsys_context *swc, struct pipe_fence_handle **pfence) { struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); struct pipe_fence_handle *fence = NULL; unsigned i; enum pipe_error ret; ret = pb_validate_validate(vswc->validate); assert(ret == PIPE_OK); if(ret == PIPE_OK) { /* Apply relocations */ for(i = 0; i < vswc->region.used; ++i) { struct vmw_buffer_relocation *reloc = &vswc->region.relocs[i]; struct SVGAGuestPtr ptr; if(!vmw_gmr_bufmgr_region_ptr(reloc->buffer, &ptr)) assert(0); ptr.offset += reloc->offset; if (reloc->is_mob) { if (reloc->mob.id) *reloc->mob.id = ptr.gmrId; if (reloc->mob.offset_into_mob) *reloc->mob.offset_into_mob = ptr.offset; else { assert(ptr.offset == 0); } } else *reloc->region.where = ptr; } if (vswc->command.used || pfence != NULL) vmw_ioctl_command(vswc->vws, vswc->base.cid, 0, vswc->command.buffer, vswc->command.used, &fence); pb_validate_fence(vswc->validate, fence); } vswc->command.used = 0; vswc->command.reserved = 0; for(i = 0; i < vswc->surface.used + vswc->surface.staged; ++i) { struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i]; if (isurf->referenced) p_atomic_dec(&isurf->vsurf->validated); vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL); } util_hash_table_clear(vswc->hash); vswc->surface.used = 0; vswc->surface.reserved = 0; for(i = 0; i < vswc->shader.used + vswc->shader.staged; ++i) { struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i]; if (ishader->referenced) p_atomic_dec(&ishader->vshader->validated); vmw_svga_winsys_shader_reference(&ishader->vshader, NULL); } vswc->shader.used = 0; vswc->shader.reserved = 0; vswc->region.used = 0; vswc->region.reserved = 0; #ifdef DEBUG vswc->must_flush = FALSE; debug_flush_flush(vswc->fctx); #endif vswc->preemptive_flush = FALSE; vswc->seen_surfaces = 0; vswc->seen_regions = 0; vswc->seen_mobs = 0; if(pfence) vmw_fence_reference(vswc->vws, pfence, fence); vmw_fence_reference(vswc->vws, &fence, NULL); return ret; }