/** * Wait for the fence to expire, and remove it from the fenced list. * * This function will release and re-acquire the mutex, so any copy of mutable * state must be discarded after calling it. */ static inline enum pipe_error fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr, struct fenced_buffer *fenced_buf) { struct pb_fence_ops *ops = fenced_mgr->ops; enum pipe_error ret = PIPE_ERROR; #if 0 debug_warning("waiting for GPU"); #endif assert(pipe_is_referenced(&fenced_buf->base.reference)); assert(fenced_buf->fence); if(fenced_buf->fence) { struct pipe_fence_handle *fence = NULL; int finished; boolean proceed; ops->fence_reference(ops, &fence, fenced_buf->fence); pipe_mutex_unlock(fenced_mgr->mutex); finished = ops->fence_finish(ops, fenced_buf->fence, 0); pipe_mutex_lock(fenced_mgr->mutex); assert(pipe_is_referenced(&fenced_buf->base.reference)); /* * Only proceed if the fence object didn't change in the meanwhile. * Otherwise assume the work has been already carried out by another * thread that re-aquired the lock before us. */ proceed = fence == fenced_buf->fence ? TRUE : FALSE; ops->fence_reference(ops, &fence, NULL); if(proceed && finished == 0) { /* * Remove from the fenced list */ boolean destroyed; destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf); /* TODO: remove consequents buffers with the same fence? */ assert(!destroyed); (void) destroyed; /* silence unused var warning for non-debug build */ fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE; ret = PIPE_OK; } } return ret; }
/** * Remove as many fenced buffers from the fenced list as possible. * * Returns TRUE if at least one buffer was removed. */ static boolean fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr, boolean wait) { struct pb_fence_ops *ops = fenced_mgr->ops; struct list_head *curr, *next; struct fenced_buffer *fenced_buf; struct pipe_fence_handle *prev_fence = NULL; boolean ret = FALSE; curr = fenced_mgr->fenced.next; next = curr->next; while(curr != &fenced_mgr->fenced) { fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); if(fenced_buf->fence != prev_fence) { int signaled; if (wait) { signaled = ops->fence_finish(ops, fenced_buf->fence, 0); /* * Don't return just now. Instead preemptively check if the * following buffers' fences already expired, * without further waits. */ wait = FALSE; } else { signaled = ops->fence_signalled(ops, fenced_buf->fence, 0); } if (signaled != 0) { return ret; } prev_fence = fenced_buf->fence; } else { /* This buffer's fence object is identical to the previous buffer's * fence object, so no need to check the fence again. */ assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0); } fenced_buffer_remove_locked(fenced_mgr, fenced_buf); ret = TRUE; curr = next; next = curr->next; } return ret; }
static void fenced_buffer_fence(struct pb_buffer *buf, struct pipe_fence_handle *fence) { struct fenced_buffer *fenced_buf = fenced_buffer(buf); struct fenced_manager *fenced_mgr = fenced_buf->mgr; struct pb_fence_ops *ops = fenced_mgr->ops; pipe_mutex_lock(fenced_mgr->mutex); assert(pipe_is_referenced(&fenced_buf->base.base.reference)); assert(fenced_buf->buffer); if(fence != fenced_buf->fence) { assert(fenced_buf->vl); assert(fenced_buf->validation_flags); if (fenced_buf->fence) { boolean destroyed; destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf); assert(!destroyed); } if (fence) { ops->fence_reference(ops, &fenced_buf->fence, fence); fenced_buf->flags |= fenced_buf->validation_flags; fenced_buffer_add_locked(fenced_mgr, fenced_buf); } pb_fence(fenced_buf->buffer, fence); fenced_buf->vl = NULL; fenced_buf->validation_flags = 0; } pipe_mutex_unlock(fenced_mgr->mutex); }