static void vl_dri3_screen_destroy(struct vl_screen *vscreen) { struct vl_dri3_screen *scrn = (struct vl_dri3_screen *)vscreen; int i; assert(vscreen); dri3_flush_present_events(scrn); if (scrn->front_buffer) { dri3_free_front_buffer(scrn, scrn->front_buffer); scrn->front_buffer = NULL; return; } for (i = 0; i < BACK_BUFFER_NUM; ++i) { if (scrn->back_buffers[i]) { dri3_free_back_buffer(scrn, scrn->back_buffers[i]); scrn->back_buffers[i] = NULL; } } if (scrn->special_event) xcb_unregister_for_special_event(scrn->conn, scrn->special_event); scrn->base.pscreen->destroy(scrn->base.pscreen); pipe_loader_release(&scrn->base.dev, 1); FREE(scrn); return; }
static struct vl_dri3_buffer * dri3_get_back_buffer(struct vl_dri3_screen *scrn) { struct vl_dri3_buffer *buffer; struct pipe_resource *texture = NULL; assert(scrn); scrn->cur_back = dri3_find_back(scrn); if (scrn->cur_back < 0) return NULL; buffer = scrn->back_buffers[scrn->cur_back]; if (!buffer || buffer->width != scrn->width || buffer->height != scrn->height) { struct vl_dri3_buffer *new_buffer; new_buffer = dri3_alloc_back_buffer(scrn); if (!new_buffer) return NULL; if (buffer) dri3_free_back_buffer(scrn, buffer); vl_compositor_reset_dirty_area(&scrn->dirty_areas[scrn->cur_back]); buffer = new_buffer; scrn->back_buffers[scrn->cur_back] = buffer; } pipe_resource_reference(&texture, buffer->texture); xcb_flush(scrn->conn); xshmfence_await(buffer->shm_fence); return buffer; }
static void vl_dri3_screen_destroy(struct vl_screen *vscreen) { struct vl_dri3_screen *scrn = (struct vl_dri3_screen *)vscreen; int i; assert(vscreen); dri3_flush_present_events(scrn); if (scrn->front_buffer) { dri3_free_front_buffer(scrn, scrn->front_buffer); scrn->front_buffer = NULL; } for (i = 0; i < BACK_BUFFER_NUM; ++i) { if (scrn->back_buffers[i]) { dri3_free_back_buffer(scrn, scrn->back_buffers[i]); scrn->back_buffers[i] = NULL; } } if (scrn->special_event) { xcb_void_cookie_t cookie = xcb_present_select_input_checked(scrn->conn, scrn->eid, scrn->drawable, XCB_PRESENT_EVENT_MASK_NO_EVENT); xcb_discard_reply(scrn->conn, cookie.sequence); xcb_unregister_for_special_event(scrn->conn, scrn->special_event); } scrn->pipe->destroy(scrn->pipe); scrn->base.pscreen->destroy(scrn->base.pscreen); pipe_loader_release(&scrn->base.dev, 1); FREE(scrn); return; }
static struct vl_dri3_buffer * dri3_get_back_buffer(struct vl_dri3_screen *scrn) { struct vl_dri3_buffer *buffer; struct pipe_resource *texture = NULL; bool allocate_new_buffer = false; int b, id; assert(scrn); scrn->cur_back = dri3_find_back(scrn); if (scrn->cur_back < 0) return NULL; buffer = scrn->back_buffers[scrn->cur_back]; if (scrn->output_texture) { if (!buffer || buffer->width < scrn->width || buffer->height < scrn->height) allocate_new_buffer = true; else if (scrn->is_different_gpu) /* In case of different gpu we can reuse the linear * texture so we only need to set the external * texture for copying */ buffer->texture = scrn->output_texture; else { /* In case of a single gpu we search if the texture is * already present as buffer if not we get the * handle and pixmap for the texture that is set */ for (b = 0; b < BACK_BUFFER_NUM; b++) { id = (b + scrn->cur_back) % BACK_BUFFER_NUM; buffer = scrn->back_buffers[id]; if (buffer && !buffer->busy && buffer->texture == scrn->output_texture) { scrn->cur_back = id; break; } } if (b == BACK_BUFFER_NUM) { allocate_new_buffer = true; scrn->cur_back = scrn->next_back; scrn->next_back = (scrn->next_back + 1) % BACK_BUFFER_NUM; buffer = scrn->back_buffers[scrn->cur_back]; } } } else { if (!buffer || buffer->width != scrn->width || buffer->height != scrn->height) allocate_new_buffer = true; } if (allocate_new_buffer) { struct vl_dri3_buffer *new_buffer; new_buffer = dri3_alloc_back_buffer(scrn); if (!new_buffer) return NULL; if (buffer) dri3_free_back_buffer(scrn, buffer); if (!scrn->output_texture) vl_compositor_reset_dirty_area(&scrn->dirty_areas[scrn->cur_back]); buffer = new_buffer; scrn->back_buffers[scrn->cur_back] = buffer; } pipe_resource_reference(&texture, buffer->texture); xcb_flush(scrn->conn); xshmfence_await(buffer->shm_fence); return buffer; }