/** * Allocate DMA'ble or Updatable storage for the buffer. * * Called before mapping a buffer. */ enum pipe_error svga_buffer_create_hw_storage(struct svga_screen *ss, struct svga_buffer *sbuf) { assert(!sbuf->user); if (ss->sws->have_gb_objects) { assert(sbuf->handle || !sbuf->dma.pending); return svga_buffer_create_host_surface(ss, sbuf); } if (!sbuf->hwbuf) { struct svga_winsys_screen *sws = ss->sws; unsigned alignment = 16; unsigned usage = 0; unsigned size = sbuf->b.b.width0; sbuf->hwbuf = sws->buffer_create(sws, alignment, usage, size); if (!sbuf->hwbuf) return PIPE_ERROR_OUT_OF_MEMORY; assert(!sbuf->dma.pending); } return PIPE_OK; }
static struct pipe_buffer * svga_buffer_create(struct pipe_screen *screen, unsigned alignment, unsigned usage, unsigned size) { struct svga_screen *ss = svga_screen(screen); struct svga_buffer *sbuf; assert(size); assert(alignment); sbuf = CALLOC_STRUCT(svga_buffer); if(!sbuf) goto error1; sbuf->magic = SVGA_BUFFER_MAGIC; pipe_reference_init(&sbuf->base.reference, 1); sbuf->base.screen = screen; sbuf->base.alignment = alignment; sbuf->base.usage = usage; sbuf->base.size = size; if(svga_buffer_needs_hw_storage(usage)) { if(svga_buffer_create_host_surface(ss, sbuf) != PIPE_OK) goto error2; } else { if(alignment < sizeof(void*)) alignment = sizeof(void*); usage |= PIPE_BUFFER_USAGE_CPU_READ_WRITE; sbuf->swbuf = align_malloc(size, alignment); if(!sbuf->swbuf) goto error2; } return &sbuf->base; error2: FREE(sbuf); error1: return NULL; }
/* Get (or create/upload) the winsys surface handle so that we can * refer to this buffer in fifo commands. */ struct svga_winsys_surface * svga_buffer_handle(struct svga_context *svga, struct pipe_resource *buf) { struct pipe_screen *screen = svga->pipe.screen; struct svga_screen *ss = svga_screen(screen); struct svga_buffer *sbuf; enum pipe_error ret; if (!buf) return NULL; sbuf = svga_buffer(buf); assert(!sbuf->map.count); assert(!sbuf->user); if (!sbuf->handle) { ret = svga_buffer_create_host_surface(ss, sbuf); if (ret != PIPE_OK) return NULL; } assert(sbuf->handle); if (sbuf->map.num_ranges) { if (!sbuf->dma.pending) { /* * No pending DMA upload yet, so insert a DMA upload command now. */ /* * Migrate the data from swbuf -> hwbuf if necessary. */ ret = svga_buffer_update_hw(ss, sbuf); if (ret == PIPE_OK) { /* * Queue a dma command. */ ret = svga_buffer_upload_command(svga, sbuf); if (ret == PIPE_ERROR_OUT_OF_MEMORY) { svga_context_flush(svga, NULL); ret = svga_buffer_upload_command(svga, sbuf); assert(ret == PIPE_OK); } if (ret == PIPE_OK) { sbuf->dma.pending = TRUE; assert(!sbuf->head.prev && !sbuf->head.next); LIST_ADDTAIL(&sbuf->head, &svga->dirty_buffers); } } else if (ret == PIPE_ERROR_OUT_OF_MEMORY) { /* * The buffer is too big to fit in the GMR aperture, so break it in * smaller pieces. */ ret = svga_buffer_upload_piecewise(ss, svga, sbuf); } if (ret != PIPE_OK) { /* * Something unexpected happened above. There is very little that * we can do other than proceeding while ignoring the dirty ranges. */ assert(0); sbuf->map.num_ranges = 0; } } else { /* * There a pending dma already. Make sure it is from this context. */ assert(sbuf->dma.svga == svga); } } assert(!sbuf->map.num_ranges || sbuf->dma.pending); return sbuf->handle; }