static void virgl_texture_transfer_unmap(struct pipe_context *ctx, struct pipe_transfer *transfer) { struct virgl_context *vctx = virgl_context(ctx); struct virgl_transfer *trans = virgl_transfer(transfer); struct virgl_texture *vtex = virgl_texture(transfer->resource); uint32_t l_stride; if (transfer->resource->target != PIPE_TEXTURE_3D && transfer->resource->target != PIPE_TEXTURE_CUBE && transfer->resource->target != PIPE_TEXTURE_1D_ARRAY && transfer->resource->target != PIPE_TEXTURE_2D_ARRAY && transfer->resource->target != PIPE_TEXTURE_CUBE_ARRAY) l_stride = 0; else l_stride = trans->base.layer_stride; if (trans->base.usage & PIPE_TRANSFER_WRITE) { if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) { struct virgl_screen *vs = virgl_screen(ctx->screen); vtex->base.clean = FALSE; vctx->num_transfers++; vs->vws->transfer_put(vs->vws, vtex->base.hw_res, &transfer->box, trans->base.stride, l_stride, trans->offset, transfer->level); } } if (trans->resolve_tmp) pipe_resource_reference((struct pipe_resource **)&trans->resolve_tmp, NULL); util_slab_free(&vctx->texture_transfer_pool, trans); }
static void virgl_buffer_transfer_flush_region(struct pipe_context *ctx, struct pipe_transfer *transfer, const struct pipe_box *box) { struct virgl_transfer *trans = virgl_transfer(transfer); /* * FIXME: This is not optimal. For example, * * glMapBufferRange(.., 0, 100, GL_MAP_FLUSH_EXPLICIT_BIT) * glFlushMappedBufferRange(.., 25, 30) * glFlushMappedBufferRange(.., 65, 70) * * We'll end up flushing 25 --> 70. */ util_range_add(&trans->range, box->x, box->x + box->width); }
static void virgl_buffer_transfer_unmap(struct pipe_context *ctx, struct pipe_transfer *transfer) { struct virgl_context *vctx = virgl_context(ctx); struct virgl_transfer *trans = virgl_transfer(transfer); struct virgl_buffer *vbuf = virgl_buffer(transfer->resource); if (trans->base.usage & PIPE_TRANSFER_WRITE) { if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) { struct virgl_screen *vs = virgl_screen(ctx->screen); vbuf->base.clean = FALSE; vctx->num_transfers++; vs->vws->transfer_put(vs->vws, vbuf->base.hw_res, &transfer->box, trans->base.stride, trans->base.layer_stride, trans->offset, transfer->level); } } util_slab_free(&vctx->texture_transfer_pool, trans); }
static void virgl_buffer_transfer_unmap(struct pipe_context *ctx, struct pipe_transfer *transfer) { struct virgl_context *vctx = virgl_context(ctx); struct virgl_transfer *trans = virgl_transfer(transfer); if (trans->base.usage & PIPE_TRANSFER_WRITE) { if (transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT) { if (trans->range.end <= trans->range.start) { virgl_resource_destroy_transfer(&vctx->transfer_pool, trans); return; } transfer->box.x += trans->range.start; transfer->box.width = trans->range.end - trans->range.start; trans->offset = transfer->box.x; } virgl_transfer_queue_unmap(&vctx->queue, trans); } else virgl_resource_destroy_transfer(&vctx->transfer_pool, trans); }