static void fenced_bufmgr_destroy(struct pb_manager *mgr) { struct fenced_manager *fenced_mgr = fenced_manager(mgr); pipe_mutex_lock(fenced_mgr->mutex); /* Wait on outstanding fences */ while (fenced_mgr->num_fenced) { pipe_mutex_unlock(fenced_mgr->mutex); #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) sched_yield(); #endif pipe_mutex_lock(fenced_mgr->mutex); while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE)) ; } #ifdef DEBUG /*assert(!fenced_mgr->num_unfenced);*/ #endif pipe_mutex_unlock(fenced_mgr->mutex); pipe_mutex_destroy(fenced_mgr->mutex); if(fenced_mgr->provider) fenced_mgr->provider->destroy(fenced_mgr->provider); fenced_mgr->ops->destroy(fenced_mgr->ops); FREE(fenced_mgr); }
/** * Wrap a buffer in a fenced buffer. */ static struct pb_buffer * fenced_bufmgr_create_buffer(struct pb_manager *mgr, pb_size size, const struct pb_desc *desc) { struct fenced_manager *fenced_mgr = fenced_manager(mgr); struct fenced_buffer *fenced_buf; enum pipe_error ret; fenced_buf = CALLOC_STRUCT(fenced_buffer); if(!fenced_buf) goto no_buffer; pipe_reference_init(&fenced_buf->base.reference, 1); fenced_buf->base.alignment = desc->alignment; fenced_buf->base.usage = desc->usage; fenced_buf->base.size = size; fenced_buf->size = size; fenced_buf->base.vtbl = &fenced_buffer_vtbl; fenced_buf->mgr = fenced_mgr; pipe_mutex_lock(fenced_mgr->mutex); /* * Try to create GPU storage without stalling, */ ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, desc, TRUE); /* * Give up. */ if(ret != PIPE_OK) { goto no_storage; } assert(fenced_buf->buffer); LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced); ++fenced_mgr->num_unfenced; pipe_mutex_unlock(fenced_mgr->mutex); return &fenced_buf->base; no_storage: pipe_mutex_unlock(fenced_mgr->mutex); FREE(fenced_buf); no_buffer: return NULL; }
static void fenced_bufmgr_flush(struct pb_manager *mgr) { struct fenced_manager *fenced_mgr = fenced_manager(mgr); pipe_mutex_lock(fenced_mgr->mutex); while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE)) ; pipe_mutex_unlock(fenced_mgr->mutex); assert(fenced_mgr->provider->flush); if(fenced_mgr->provider->flush) fenced_mgr->provider->flush(fenced_mgr->provider); }
/** * Wrap a buffer in a fenced buffer. */ static struct pb_buffer * fenced_bufmgr_create_buffer(struct pb_manager *mgr, pb_size size, const struct pb_desc *desc) { struct fenced_manager *fenced_mgr = fenced_manager(mgr); struct fenced_buffer *fenced_buf; enum pipe_error ret; /* * Don't stall the GPU, waste time evicting buffers, or waste memory * trying to create a buffer that will most likely never fit into the * graphics aperture. */ if(size > fenced_mgr->max_buffer_size) { goto no_buffer; } fenced_buf = CALLOC_STRUCT(fenced_buffer); if(!fenced_buf) goto no_buffer; pipe_reference_init(&fenced_buf->base.base.reference, 1); fenced_buf->base.base.alignment = desc->alignment; fenced_buf->base.base.usage = desc->usage; fenced_buf->base.base.size = size; fenced_buf->size = size; fenced_buf->desc = *desc; fenced_buf->base.vtbl = &fenced_buffer_vtbl; fenced_buf->mgr = fenced_mgr; pipe_mutex_lock(fenced_mgr->mutex); /* * Try to create GPU storage without stalling, */ ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE); /* * Attempt to use CPU memory to avoid stalling the GPU. */ if(ret != PIPE_OK) { ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf); } /* * Create GPU storage, waiting for some to be available. */ if(ret != PIPE_OK) { ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE); } /* * Give up. */ if(ret != PIPE_OK) { goto no_storage; } assert(fenced_buf->buffer || fenced_buf->data); LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced); ++fenced_mgr->num_unfenced; pipe_mutex_unlock(fenced_mgr->mutex); return &fenced_buf->base; no_storage: pipe_mutex_unlock(fenced_mgr->mutex); FREE(fenced_buf); no_buffer: return NULL; }