static void fenced_buffer_get_base_buffer(struct pb_buffer *buf, struct pb_buffer **base_buf, pb_size *offset) { struct fenced_buffer *fenced_buf = fenced_buffer(buf); struct fenced_manager *fenced_mgr = fenced_buf->mgr; pipe_mutex_lock(fenced_mgr->mutex); /* * This should only be called when the buffer is validated. Typically * when processing relocations. */ assert(fenced_buf->vl); assert(fenced_buf->buffer); if(fenced_buf->buffer) pb_get_base_buffer(fenced_buf->buffer, base_buf, offset); else { *base_buf = buf; *offset = 0; } pipe_mutex_unlock(fenced_mgr->mutex); }
static void pb_cache_buffer_get_base_buffer(struct pb_buffer *_buf, struct pb_buffer **base_buf, pb_size *offset) { struct pb_cache_buffer *buf = pb_cache_buffer(_buf); pb_get_base_buffer(buf->buffer, base_buf, offset); }
static void pb_debug_buffer_get_base_buffer(struct pb_buffer *_buf, struct pb_buffer **base_buf, pb_size *offset) { struct pb_debug_buffer *buf = pb_debug_buffer(_buf); pb_get_base_buffer(buf->buffer, base_buf, offset); *offset += buf->underflow_size; }
static void pool_buffer_get_base_buffer(struct pb_buffer *buf, struct pb_buffer **base_buf, pb_size *offset) { struct pool_buffer *pool_buf = pool_buffer(buf); struct pool_pb_manager *pool = pool_buf->mgr; pb_get_base_buffer(pool->buffer, base_buf, offset); *offset += pool_buf->start; }
static void mm_buffer_get_base_buffer(struct pb_buffer *buf, struct pb_buffer **base_buf, pb_size *offset) { struct mm_buffer *mm_buf = mm_buffer(buf); struct mm_pb_manager *mm = mm_buf->mgr; pb_get_base_buffer(mm->buffer, base_buf, offset); *offset += mm_buf->block->ofs; }
static void pb_ondemand_buffer_get_base_buffer(struct pb_buffer *_buf, struct pb_buffer **base_buf, unsigned *offset) { struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf); if(pb_ondemand_buffer_instantiate(buf) != PIPE_OK) { assert(0); *base_buf = &buf->base; *offset = 0; return; } pb_get_base_buffer(buf->buffer, base_buf, offset); }
static struct radeon_bo *get_radeon_bo(struct pb_buffer *_buf) { struct radeon_bo *bo = NULL; if (_buf->vtbl == &radeon_bo_vtbl) { bo = radeon_bo(_buf); } else { struct pb_buffer *base_buf; pb_size offset; pb_get_base_buffer(_buf, &base_buf, &offset); if (base_buf->vtbl == &radeon_bo_vtbl) bo = radeon_bo(base_buf); } return bo; }
static struct amdgpu_winsys_bo *get_amdgpu_winsys_bo(struct pb_buffer *_buf) { struct amdgpu_winsys_bo *bo = NULL; if (_buf->vtbl == &amdgpu_winsys_bo_vtbl) { bo = amdgpu_winsys_bo(_buf); } else { struct pb_buffer *base_buf; pb_size offset; pb_get_base_buffer(_buf, &base_buf, &offset); if (base_buf->vtbl == &amdgpu_winsys_bo_vtbl) bo = amdgpu_winsys_bo(base_buf); } return bo; }
boolean vmw_gmr_bufmgr_region_ptr(struct pb_buffer *buf, struct SVGAGuestPtr *ptr) { struct pb_buffer *base_buf; unsigned offset = 0; struct vmw_gmr_buffer *gmr_buf; pb_get_base_buffer( buf, &base_buf, &offset ); gmr_buf = vmw_gmr_buffer(base_buf); if(!gmr_buf) return FALSE; *ptr = vmw_ioctl_region_ptr(gmr_buf->region); ptr->offset += offset; return TRUE; }
static void fenced_buffer_get_base_buffer(struct pb_buffer *buf, struct pb_buffer **base_buf, pb_size *offset) { struct fenced_buffer *fenced_buf = fenced_buffer(buf); struct fenced_manager *fenced_mgr = fenced_buf->mgr; pipe_mutex_lock(fenced_mgr->mutex); assert(fenced_buf->buffer); if(fenced_buf->buffer) pb_get_base_buffer(fenced_buf->buffer, base_buf, offset); else { *base_buf = buf; *offset = 0; } pipe_mutex_unlock(fenced_mgr->mutex); }