/** * Wrap a buffer in a fenced buffer. */ static struct pb_buffer * fenced_bufmgr_create_buffer(struct pb_manager *mgr, pb_size size, const struct pb_desc *desc) { struct fenced_manager *fenced_mgr = fenced_manager(mgr); struct fenced_buffer *fenced_buf; enum pipe_error ret; fenced_buf = CALLOC_STRUCT(fenced_buffer); if(!fenced_buf) goto no_buffer; pipe_reference_init(&fenced_buf->base.reference, 1); fenced_buf->base.alignment = desc->alignment; fenced_buf->base.usage = desc->usage; fenced_buf->base.size = size; fenced_buf->size = size; fenced_buf->base.vtbl = &fenced_buffer_vtbl; fenced_buf->mgr = fenced_mgr; pipe_mutex_lock(fenced_mgr->mutex); /* * Try to create GPU storage without stalling, */ ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, desc, TRUE); /* * Give up. */ if(ret != PIPE_OK) { goto no_storage; } assert(fenced_buf->buffer); LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced); ++fenced_mgr->num_unfenced; pipe_mutex_unlock(fenced_mgr->mutex); return &fenced_buf->base; no_storage: pipe_mutex_unlock(fenced_mgr->mutex); FREE(fenced_buf); no_buffer: return NULL; }
/** * Wrap a buffer in a fenced buffer. */ static struct pb_buffer * fenced_bufmgr_create_buffer(struct pb_manager *mgr, pb_size size, const struct pb_desc *desc) { struct fenced_manager *fenced_mgr = fenced_manager(mgr); struct fenced_buffer *fenced_buf; enum pipe_error ret; /* * Don't stall the GPU, waste time evicting buffers, or waste memory * trying to create a buffer that will most likely never fit into the * graphics aperture. */ if(size > fenced_mgr->max_buffer_size) { goto no_buffer; } fenced_buf = CALLOC_STRUCT(fenced_buffer); if(!fenced_buf) goto no_buffer; pipe_reference_init(&fenced_buf->base.base.reference, 1); fenced_buf->base.base.alignment = desc->alignment; fenced_buf->base.base.usage = desc->usage; fenced_buf->base.base.size = size; fenced_buf->size = size; fenced_buf->desc = *desc; fenced_buf->base.vtbl = &fenced_buffer_vtbl; fenced_buf->mgr = fenced_mgr; pipe_mutex_lock(fenced_mgr->mutex); /* * Try to create GPU storage without stalling, */ ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE); /* * Attempt to use CPU memory to avoid stalling the GPU. */ if(ret != PIPE_OK) { ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf); } /* * Create GPU storage, waiting for some to be available. */ if(ret != PIPE_OK) { ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE); } /* * Give up. */ if(ret != PIPE_OK) { goto no_storage; } assert(fenced_buf->buffer || fenced_buf->data); LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced); ++fenced_mgr->num_unfenced; pipe_mutex_unlock(fenced_mgr->mutex); return &fenced_buf->base; no_storage: pipe_mutex_unlock(fenced_mgr->mutex); FREE(fenced_buf); no_buffer: return NULL; }
static enum pipe_error fenced_buffer_validate(struct pb_buffer *buf, struct pb_validate *vl, unsigned flags) { struct fenced_buffer *fenced_buf = fenced_buffer(buf); struct fenced_manager *fenced_mgr = fenced_buf->mgr; enum pipe_error ret; pipe_mutex_lock(fenced_mgr->mutex); if(!vl) { /* invalidate */ fenced_buf->vl = NULL; fenced_buf->validation_flags = 0; ret = PIPE_OK; goto done; } assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE); assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE)); flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE; /* Buffer cannot be validated in two different lists */ if(fenced_buf->vl && fenced_buf->vl != vl) { ret = PIPE_ERROR_RETRY; goto done; } if(fenced_buf->vl == vl && (fenced_buf->validation_flags & flags) == flags) { /* Nothing to do -- buffer already validated */ ret = PIPE_OK; goto done; } /* * Create and update GPU storage. */ if(!fenced_buf->buffer) { assert(!fenced_buf->mapcount); ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE); if(ret != PIPE_OK) { goto done; } ret = fenced_buffer_copy_storage_to_gpu_locked(fenced_buf); if(ret != PIPE_OK) { fenced_buffer_destroy_gpu_storage_locked(fenced_buf); goto done; } if(fenced_buf->mapcount) { debug_printf("warning: validating a buffer while it is still mapped\n"); } else { fenced_buffer_destroy_cpu_storage_locked(fenced_buf); } } ret = pb_validate(fenced_buf->buffer, vl, flags); if (ret != PIPE_OK) goto done; fenced_buf->vl = vl; fenced_buf->validation_flags |= flags; done: pipe_mutex_unlock(fenced_mgr->mutex); return ret; }