static struct vl_dri3_buffer * dri3_get_back_buffer(struct vl_dri3_screen *scrn) { struct vl_dri3_buffer *buffer; struct pipe_resource *texture = NULL; assert(scrn); scrn->cur_back = dri3_find_back(scrn); if (scrn->cur_back < 0) return NULL; buffer = scrn->back_buffers[scrn->cur_back]; if (!buffer || buffer->width != scrn->width || buffer->height != scrn->height) { struct vl_dri3_buffer *new_buffer; new_buffer = dri3_alloc_back_buffer(scrn); if (!new_buffer) return NULL; if (buffer) dri3_free_back_buffer(scrn, buffer); vl_compositor_reset_dirty_area(&scrn->dirty_areas[scrn->cur_back]); buffer = new_buffer; scrn->back_buffers[scrn->cur_back] = buffer; } pipe_resource_reference(&texture, buffer->texture); xcb_flush(scrn->conn); xshmfence_await(buffer->shm_fence); return buffer; }
static VkResult x11_acquire_next_image_poll_x11(struct x11_swapchain *chain, uint32_t *image_index, uint64_t timeout) { xcb_generic_event_t *event; struct pollfd pfds; uint64_t atimeout; while (1) { for (uint32_t i = 0; i < chain->image_count; i++) { if (!chain->images[i].busy) { /* We found a non-busy image */ xshmfence_await(chain->images[i].shm_fence); *image_index = i; chain->images[i].busy = true; return VK_SUCCESS; } } xcb_flush(chain->conn); if (timeout == UINT64_MAX) { event = xcb_wait_for_special_event(chain->conn, chain->special_event); if (!event) return VK_ERROR_OUT_OF_DATE_KHR; } else { event = xcb_poll_for_special_event(chain->conn, chain->special_event); if (!event) { int ret; if (timeout == 0) return VK_NOT_READY; atimeout = wsi_get_absolute_timeout(timeout); pfds.fd = xcb_get_file_descriptor(chain->conn); pfds.events = POLLIN; ret = poll(&pfds, 1, timeout / 1000 / 1000); if (ret == 0) return VK_TIMEOUT; if (ret == -1) return VK_ERROR_OUT_OF_DATE_KHR; /* If a non-special event happens, the fd will still * poll. So recalculate the timeout now just in case. */ uint64_t current_time = wsi_get_current_time(); if (atimeout > current_time) timeout = atimeout - current_time; else timeout = 0; continue; } } VkResult result = x11_handle_dri3_present_event(chain, (void *)event); free(event); if (result != VK_SUCCESS) return result; } }
static VkResult x11_acquire_next_image_from_queue(struct x11_swapchain *chain, uint32_t *image_index_out, uint64_t timeout) { assert(chain->threaded); uint32_t image_index; VkResult result = wsi_queue_pull(&chain->acquire_queue, &image_index, timeout); if (result != VK_SUCCESS) { return result; } else if (chain->status != VK_SUCCESS) { return chain->status; } assert(image_index < chain->image_count); xshmfence_await(chain->images[image_index].shm_fence); *image_index_out = image_index; return VK_SUCCESS; }
static inline void dri3_fence_await(xcb_connection_t *c, struct loader_dri3_buffer *buffer) { xcb_flush(c); xshmfence_await(buffer->shm_fence); }
static struct vl_dri3_buffer * dri3_get_back_buffer(struct vl_dri3_screen *scrn) { struct vl_dri3_buffer *buffer; struct pipe_resource *texture = NULL; bool allocate_new_buffer = false; int b, id; assert(scrn); scrn->cur_back = dri3_find_back(scrn); if (scrn->cur_back < 0) return NULL; buffer = scrn->back_buffers[scrn->cur_back]; if (scrn->output_texture) { if (!buffer || buffer->width < scrn->width || buffer->height < scrn->height) allocate_new_buffer = true; else if (scrn->is_different_gpu) /* In case of different gpu we can reuse the linear * texture so we only need to set the external * texture for copying */ buffer->texture = scrn->output_texture; else { /* In case of a single gpu we search if the texture is * already present as buffer if not we get the * handle and pixmap for the texture that is set */ for (b = 0; b < BACK_BUFFER_NUM; b++) { id = (b + scrn->cur_back) % BACK_BUFFER_NUM; buffer = scrn->back_buffers[id]; if (buffer && !buffer->busy && buffer->texture == scrn->output_texture) { scrn->cur_back = id; break; } } if (b == BACK_BUFFER_NUM) { allocate_new_buffer = true; scrn->cur_back = scrn->next_back; scrn->next_back = (scrn->next_back + 1) % BACK_BUFFER_NUM; buffer = scrn->back_buffers[scrn->cur_back]; } } } else { if (!buffer || buffer->width != scrn->width || buffer->height != scrn->height) allocate_new_buffer = true; } if (allocate_new_buffer) { struct vl_dri3_buffer *new_buffer; new_buffer = dri3_alloc_back_buffer(scrn); if (!new_buffer) return NULL; if (buffer) dri3_free_back_buffer(scrn, buffer); if (!scrn->output_texture) vl_compositor_reset_dirty_area(&scrn->dirty_areas[scrn->cur_back]); buffer = new_buffer; scrn->back_buffers[scrn->cur_back] = buffer; } pipe_resource_reference(&texture, buffer->texture); xcb_flush(scrn->conn); xshmfence_await(buffer->shm_fence); return buffer; }