static struct vl_dri3_buffer * dri3_get_back_buffer(struct vl_dri3_screen *scrn) { struct vl_dri3_buffer *buffer; struct pipe_resource *texture = NULL; assert(scrn); scrn->cur_back = dri3_find_back(scrn); if (scrn->cur_back < 0) return NULL; buffer = scrn->back_buffers[scrn->cur_back]; if (!buffer || buffer->width != scrn->width || buffer->height != scrn->height) { struct vl_dri3_buffer *new_buffer; new_buffer = dri3_alloc_back_buffer(scrn); if (!new_buffer) return NULL; if (buffer) dri3_free_back_buffer(scrn, buffer); vl_compositor_reset_dirty_area(&scrn->dirty_areas[scrn->cur_back]); buffer = new_buffer; scrn->back_buffers[scrn->cur_back] = buffer; } pipe_resource_reference(&texture, buffer->texture); xcb_flush(scrn->conn); xshmfence_await(buffer->shm_fence); return buffer; }
int loader_dri3_query_buffer_age(struct loader_dri3_drawable *draw) { int back_id = LOADER_DRI3_BACK_ID(dri3_find_back(draw)); if (back_id < 0 || !draw->buffers[back_id]) return 0; if (draw->buffers[back_id]->last_swap != 0) return draw->send_sbc - draw->buffers[back_id]->last_swap + 1; else return 0; }
/** dri3_get_buffer * * Find a front or back buffer, allocating new ones as necessary */ static struct loader_dri3_buffer * dri3_get_buffer(__DRIdrawable *driDrawable, unsigned int format, enum loader_dri3_buffer_type buffer_type, struct loader_dri3_drawable *draw) { struct loader_dri3_buffer *buffer; int buf_id; __DRIcontext *dri_context; dri_context = draw->vtable->get_dri_context(draw); if (buffer_type == loader_dri3_buffer_back) { buf_id = dri3_find_back(draw); if (buf_id < 0) return NULL; } else { buf_id = LOADER_DRI3_FRONT_ID; } buffer = draw->buffers[buf_id]; /* Allocate a new buffer if there isn't an old one, or if that * old one is the wrong size */ if (!buffer || buffer->width != draw->width || buffer->height != draw->height) { struct loader_dri3_buffer *new_buffer; /* Allocate the new buffers */ new_buffer = dri3_alloc_render_buffer(draw, format, draw->width, draw->height, draw->depth); if (!new_buffer) return NULL; /* When resizing, copy the contents of the old buffer, waiting for that * copy to complete using our fences before proceeding */ switch (buffer_type) { case loader_dri3_buffer_back: if (buffer) { if (!buffer->linear_buffer) { dri3_fence_reset(draw->conn, new_buffer); dri3_fence_await(draw->conn, buffer); dri3_copy_area(draw->conn, buffer->pixmap, new_buffer->pixmap, dri3_drawable_gc(draw), 0, 0, 0, 0, draw->width, draw->height); dri3_fence_trigger(draw->conn, new_buffer); } else if (draw->vtable->in_current_context(draw)) { draw->ext->image->blitImage(dri_context, new_buffer->image, buffer->image, 0, 0, draw->width, draw->height, 0, 0, draw->width, draw->height, 0); } dri3_free_render_buffer(draw, buffer); } break; case loader_dri3_buffer_front: dri3_fence_reset(draw->conn, new_buffer); dri3_copy_area(draw->conn, draw->drawable, new_buffer->pixmap, dri3_drawable_gc(draw), 0, 0, 0, 0, draw->width, draw->height); dri3_fence_trigger(draw->conn, new_buffer); if (new_buffer->linear_buffer && draw->vtable->in_current_context(draw)) { dri3_fence_await(draw->conn, new_buffer); draw->ext->image->blitImage(dri_context, new_buffer->image, new_buffer->linear_buffer, 0, 0, draw->width, draw->height, 0, 0, draw->width, draw->height, 0); } break; } buffer = new_buffer; buffer->buffer_type = buffer_type; draw->buffers[buf_id] = buffer; } dri3_fence_await(draw->conn, buffer); /* Return the requested buffer */ return buffer; }
static struct vl_dri3_buffer * dri3_get_back_buffer(struct vl_dri3_screen *scrn) { struct vl_dri3_buffer *buffer; struct pipe_resource *texture = NULL; bool allocate_new_buffer = false; int b, id; assert(scrn); scrn->cur_back = dri3_find_back(scrn); if (scrn->cur_back < 0) return NULL; buffer = scrn->back_buffers[scrn->cur_back]; if (scrn->output_texture) { if (!buffer || buffer->width < scrn->width || buffer->height < scrn->height) allocate_new_buffer = true; else if (scrn->is_different_gpu) /* In case of different gpu we can reuse the linear * texture so we only need to set the external * texture for copying */ buffer->texture = scrn->output_texture; else { /* In case of a single gpu we search if the texture is * already present as buffer if not we get the * handle and pixmap for the texture that is set */ for (b = 0; b < BACK_BUFFER_NUM; b++) { id = (b + scrn->cur_back) % BACK_BUFFER_NUM; buffer = scrn->back_buffers[id]; if (buffer && !buffer->busy && buffer->texture == scrn->output_texture) { scrn->cur_back = id; break; } } if (b == BACK_BUFFER_NUM) { allocate_new_buffer = true; scrn->cur_back = scrn->next_back; scrn->next_back = (scrn->next_back + 1) % BACK_BUFFER_NUM; buffer = scrn->back_buffers[scrn->cur_back]; } } } else { if (!buffer || buffer->width != scrn->width || buffer->height != scrn->height) allocate_new_buffer = true; } if (allocate_new_buffer) { struct vl_dri3_buffer *new_buffer; new_buffer = dri3_alloc_back_buffer(scrn); if (!new_buffer) return NULL; if (buffer) dri3_free_back_buffer(scrn, buffer); if (!scrn->output_texture) vl_compositor_reset_dirty_area(&scrn->dirty_areas[scrn->cur_back]); buffer = new_buffer; scrn->back_buffers[scrn->cur_back] = buffer; } pipe_resource_reference(&texture, buffer->texture); xcb_flush(scrn->conn); xshmfence_await(buffer->shm_fence); return buffer; }