static void r600_buffer_do_flush_region(struct pipe_context *ctx, struct pipe_transfer *transfer, const struct pipe_box *box) { struct r600_common_context *rctx = (struct r600_common_context*)ctx; struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; struct r600_resource *rbuffer = r600_resource(transfer->resource); if (rtransfer->staging) { struct pipe_resource *dst, *src; unsigned soffset; struct pipe_box dma_box; dst = transfer->resource; src = &rtransfer->staging->b.b; soffset = rtransfer->offset + box->x % R600_MAP_BUFFER_ALIGNMENT; u_box_1d(soffset, box->width, &dma_box); /* Copy the staging buffer into the original one. */ rctx->dma_copy(ctx, dst, 0, box->x, 0, 0, src, 0, &dma_box); } util_range_add(&rbuffer->valid_buffer_range, box->x, box->x + box->width); }
void u_default_buffer_subdata(struct pipe_context *pipe, struct pipe_resource *resource, unsigned usage, unsigned offset, unsigned size, const void *data) { struct pipe_transfer *transfer = NULL; struct pipe_box box; uint8_t *map = NULL; assert(!(usage & PIPE_TRANSFER_READ)); /* the write flag is implicit by the nature of buffer_subdata */ usage |= PIPE_TRANSFER_WRITE; /* buffer_subdata implicitly discards the rewritten buffer range */ if (offset == 0 && size == resource->width0) { usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE; } else { usage |= PIPE_TRANSFER_DISCARD_RANGE; } u_box_1d(offset, size, &box); map = pipe->transfer_map(pipe, resource, 0, usage, &box, &transfer); if (!map) return; memcpy(map, data, size); pipe_transfer_unmap(pipe, transfer); }
static void r600_buffer_transfer_unmap(struct pipe_context *ctx, struct pipe_transfer *transfer) { struct r600_common_context *rctx = (struct r600_common_context*)ctx; struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; struct r600_resource *rbuffer = r600_resource(transfer->resource); if (rtransfer->staging) { if (rtransfer->transfer.usage & PIPE_TRANSFER_WRITE) { struct pipe_resource *dst, *src; unsigned soffset, doffset, size; struct pipe_box box; dst = transfer->resource; src = &rtransfer->staging->b.b; size = transfer->box.width; doffset = transfer->box.x; soffset = rtransfer->offset + transfer->box.x % R600_MAP_BUFFER_ALIGNMENT; u_box_1d(soffset, size, &box); /* Copy the staging buffer into the original one. */ rctx->dma_copy(ctx, dst, 0, doffset, 0, 0, src, 0, &box); } pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL); } if (transfer->usage & PIPE_TRANSFER_WRITE) { util_range_add(&rbuffer->valid_buffer_range, transfer->box.x, transfer->box.x + transfer->box.width); } util_slab_free(&rctx->pool_transfers, transfer); }
static void r600_buffer_flush_region(struct pipe_context *ctx, struct pipe_transfer *transfer, const struct pipe_box *rel_box) { if (transfer->usage & (PIPE_TRANSFER_WRITE | PIPE_TRANSFER_FLUSH_EXPLICIT)) { struct pipe_box box; u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box); r600_buffer_do_flush_region(ctx, transfer, &box); } }
void NineBuffer9_SetDirty( struct NineBuffer9 *This ) { assert(This->base.pool == D3DPOOL_MANAGED); if (!This->managed.dirty) { assert(LIST_IS_EMPTY(&This->managed.list)); list_add(&This->managed.list, &This->base.base.device->update_buffers); This->managed.dirty = TRUE; } u_box_1d(0, This->size, &This->managed.dirty_box); }
static void r600_buffer_transfer_unmap(struct pipe_context *pipe, struct pipe_transfer *transfer) { struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; if (rtransfer->staging) { struct pipe_box box; u_box_1d(0, transfer->box.width, &box); /* Copy the staging buffer into the original one. */ r600_copy_buffer(pipe, transfer->resource, transfer->box.x, &rtransfer->staging->b.b, &box); pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL); } }
void util_blitter_copy_buffer(struct blitter_context *blitter, struct pipe_resource *dst, unsigned dstx, struct pipe_resource *src, unsigned srcx, unsigned size) { struct blitter_context_priv *ctx = (struct blitter_context_priv*)blitter; struct pipe_context *pipe = ctx->base.pipe; struct pipe_vertex_buffer vb; struct pipe_stream_output_target *so_target; /* Drivers not capable of Stream Out should not call this function * in the first place. */ assert(ctx->has_stream_out); /* Some alignment is required. */ if (srcx % 4 != 0 || dstx % 4 != 0 || size % 16 != 0 || !ctx->has_stream_out) { struct pipe_box box; u_box_1d(srcx, size, &box); util_resource_copy_region(pipe, dst, 0, dstx, 0, 0, src, 0, &box); return; } blitter_set_running_flag(ctx); blitter_check_saved_vertex_states(ctx); vb.buffer = src; vb.buffer_offset = srcx; vb.stride = 4; pipe->set_vertex_buffers(pipe, 1, &vb); pipe->bind_vertex_elements_state(pipe, ctx->velem_state_readbuf); pipe->bind_vs_state(pipe, ctx->vs_pos_only); if (ctx->has_geometry_shader) pipe->bind_gs_state(pipe, NULL); pipe->bind_rasterizer_state(pipe, ctx->rs_discard_state); so_target = pipe->create_stream_output_target(pipe, dst, dstx, size); pipe->set_stream_output_targets(pipe, 1, &so_target, 0); util_draw_arrays(pipe, PIPE_PRIM_POINTS, 0, size / 16); blitter_restore_vertex_states(ctx); blitter_unset_running_flag(ctx); pipe_so_target_reference(&so_target, NULL); }
static bool virgl_end_query(struct pipe_context *ctx, struct pipe_query *q) { struct virgl_context *vctx = virgl_context(ctx); struct virgl_query *query = virgl_query(q); struct pipe_box box; uint32_t qs = VIRGL_QUERY_STATE_WAIT_HOST; u_box_1d(0, 4, &box); virgl_transfer_inline_write(ctx, &query->buf->u.b, 0, PIPE_TRANSFER_WRITE, &box, &qs, 0, 0); virgl_encoder_end_query(vctx, query->handle); return true; }
static void r600_buffer_transfer_unmap(struct pipe_context *pipe, struct pipe_transfer *transfer) { struct r600_context *rctx = (struct r600_context*)pipe; struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; if (rtransfer->staging) { struct pipe_box box; u_box_1d(transfer->box.x % R600_MAP_BUFFER_ALIGNMENT, transfer->box.width, &box); /* Copy the staging buffer into the original one. */ r600_copy_buffer(pipe, transfer->resource, transfer->box.x, &rtransfer->staging->b.b, &box); pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL); } util_slab_free(&rctx->pool_transfers, transfer); }
/** * Global buffers are not really resources, they are are actually offsets * into a single global resource (r600_screen::global_pool). The means * they don't have their own cs_buf handle, so they cannot be passed * to r600_copy_buffer() and must be handled separately. * * XXX: It should be possible to implement this function using * r600_copy_buffer() by passing the memory_pool resource as both src * and dst and updating dstx and src_box to point to the correct offsets. * This would likely perform better than the current implementation. */ static void r600_copy_global_buffer(struct pipe_context *ctx, struct pipe_resource *dst, unsigned dstx, struct pipe_resource *src, const struct pipe_box *src_box) { struct pipe_box dst_box; struct pipe_transfer *src_pxfer, *dst_pxfer; u_box_1d(dstx, src_box->width, &dst_box); void *src_ptr = ctx->transfer_map(ctx, src, 0, PIPE_TRANSFER_READ, src_box, &src_pxfer); void *dst_ptr = ctx->transfer_map(ctx, dst, 0, PIPE_TRANSFER_WRITE, &dst_box, &dst_pxfer); memcpy(dst_ptr, src_ptr, src_box->width); ctx->transfer_unmap(ctx, src_pxfer); ctx->transfer_unmap(ctx, dst_pxfer); }
void r600_buffer_subdata(struct pipe_context *ctx, struct pipe_resource *buffer, unsigned usage, unsigned offset, unsigned size, const void *data) { struct pipe_transfer *transfer = NULL; struct pipe_box box; uint8_t *map = NULL; u_box_1d(offset, size, &box); map = r600_buffer_transfer_map(ctx, buffer, 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE | usage, &box, &transfer); if (!map) return; memcpy(map, data, size); r600_buffer_transfer_unmap(ctx, transfer); }
static void r600_buffer_transfer_unmap(struct pipe_context *pipe, struct pipe_transfer *transfer) { struct r600_context *rctx = (struct r600_context*)pipe; struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; struct r600_resource *rbuffer = r600_resource(transfer->resource); if (rtransfer->staging) { struct pipe_resource *dst, *src; unsigned soffset, doffset, size; dst = transfer->resource; src = &rtransfer->staging->b.b; size = transfer->box.width; doffset = transfer->box.x; soffset = rtransfer->offset + transfer->box.x % R600_MAP_BUFFER_ALIGNMENT; /* Copy the staging buffer into the original one. */ if (rctx->b.rings.dma.cs && !(size % 4) && !(doffset % 4) && !(soffset % 4)) { if (rctx->screen->b.chip_class >= EVERGREEN) { evergreen_dma_copy(rctx, dst, src, doffset, soffset, size); } else { r600_dma_copy(rctx, dst, src, doffset, soffset, size); } } else { struct pipe_box box; u_box_1d(soffset, size, &box); r600_copy_buffer(pipe, dst, doffset, src, &box); } pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL); } if (transfer->usage & PIPE_TRANSFER_WRITE) { util_range_add(&rbuffer->valid_buffer_range, transfer->box.x, transfer->box.x + transfer->box.width); } util_slab_free(&rctx->pool_transfers, transfer); }
/** * Called via glCopyBufferSubData(). */ static void st_copy_buffer_subdata(struct gl_context *ctx, struct gl_buffer_object *src, struct gl_buffer_object *dst, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size) { struct pipe_context *pipe = st_context(ctx)->pipe; struct st_buffer_object *srcObj = st_buffer_object(src); struct st_buffer_object *dstObj = st_buffer_object(dst); struct pipe_box box; if (!size) return; /* buffer should not already be mapped */ assert(!src->Pointer); assert(!dst->Pointer); u_box_1d(readOffset, size, &box); pipe->resource_copy_region(pipe, dstObj->buffer, 0, writeOffset, 0, 0, srcObj->buffer, 0, &box); }
HRESULT NINE_WINAPI NineBuffer9_Lock( struct NineBuffer9 *This, UINT OffsetToLock, UINT SizeToLock, void **ppbData, DWORD Flags ) { struct pipe_box box; void *data; unsigned usage = d3dlock_buffer_to_pipe_transfer_usage(Flags); DBG("This=%p(pipe=%p) OffsetToLock=0x%x, SizeToLock=0x%x, Flags=0x%x\n", This, This->base.resource, OffsetToLock, SizeToLock, Flags); user_assert(ppbData, E_POINTER); user_assert(!(Flags & ~(D3DLOCK_DISCARD | D3DLOCK_DONOTWAIT | D3DLOCK_NO_DIRTY_UPDATE | D3DLOCK_NOSYSLOCK | D3DLOCK_READONLY | D3DLOCK_NOOVERWRITE)), D3DERR_INVALIDCALL); if (SizeToLock == 0) { SizeToLock = This->size - OffsetToLock; user_warn(OffsetToLock != 0); } u_box_1d(OffsetToLock, SizeToLock, &box); if (This->base.pool == D3DPOOL_MANAGED) { if (!This->managed.dirty) { assert(LIST_IS_EMPTY(&This->managed.list)); list_add(&This->managed.list, &This->base.base.device->update_buffers); This->managed.dirty = TRUE; This->managed.dirty_box = box; } else { u_box_union_2d(&This->managed.dirty_box, &This->managed.dirty_box, &box); } *ppbData = (char *)This->managed.data + OffsetToLock; DBG("returning pointer %p\n", *ppbData); This->nmaps++; return D3D_OK; } if (This->nmaps == This->maxmaps) { struct pipe_transfer **newmaps = REALLOC(This->maps, sizeof(struct pipe_transfer *)*This->maxmaps, sizeof(struct pipe_transfer *)*(This->maxmaps << 1)); if (newmaps == NULL) return E_OUTOFMEMORY; This->maxmaps <<= 1; This->maps = newmaps; } data = This->pipe->transfer_map(This->pipe, This->base.resource, 0, usage, &box, &This->maps[This->nmaps]); if (!data) { DBG("pipe::transfer_map failed\n" " usage = %x\n" " box.x = %u\n" " box.width = %u\n", usage, box.x, box.width); /* not sure what to return, msdn suggests this */ if (Flags & D3DLOCK_DONOTWAIT) return D3DERR_WASSTILLDRAWING; return D3DERR_INVALIDCALL; } DBG("returning pointer %p\n", data); This->nmaps++; *ppbData = data; return D3D_OK; }
/** * Allocate space for and store data in a buffer object. Any data that was * previously stored in the buffer object is lost. If data is NULL, * memory will be allocated, but no copy will occur. * Called via ctx->Driver.BufferData(). * \return GL_TRUE for success, GL_FALSE if out of memory */ static GLboolean st_bufferobj_data(struct gl_context *ctx, GLenum target, GLsizeiptrARB size, const GLvoid * data, GLenum usage, GLbitfield storageFlags, struct gl_buffer_object *obj) { struct st_context *st = st_context(ctx); struct pipe_context *pipe = st->pipe; struct st_buffer_object *st_obj = st_buffer_object(obj); unsigned bind, pipe_usage, pipe_flags = 0; if (size && data && st_obj->buffer && st_obj->Base.Size == size && st_obj->Base.Usage == usage && st_obj->Base.StorageFlags == storageFlags) { /* Just discard the old contents and write new data. * This should be the same as creating a new buffer, but we avoid * a lot of validation in Mesa. */ struct pipe_box box; u_box_1d(0, size, &box); pipe->transfer_inline_write(pipe, st_obj->buffer, 0, PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE, &box, data, 0, 0); return GL_TRUE; } st_obj->Base.Size = size; st_obj->Base.Usage = usage; st_obj->Base.StorageFlags = storageFlags; switch (target) { case GL_PIXEL_PACK_BUFFER_ARB: case GL_PIXEL_UNPACK_BUFFER_ARB: bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW; break; case GL_ARRAY_BUFFER_ARB: bind = PIPE_BIND_VERTEX_BUFFER; break; case GL_ELEMENT_ARRAY_BUFFER_ARB: bind = PIPE_BIND_INDEX_BUFFER; break; case GL_TEXTURE_BUFFER: bind = PIPE_BIND_SAMPLER_VIEW; break; case GL_TRANSFORM_FEEDBACK_BUFFER: bind = PIPE_BIND_STREAM_OUTPUT; break; case GL_UNIFORM_BUFFER: bind = PIPE_BIND_CONSTANT_BUFFER; break; default: bind = 0; } /* Set usage. */ if (st_obj->Base.Immutable) { /* BufferStorage */ if (storageFlags & GL_CLIENT_STORAGE_BIT) pipe_usage = PIPE_USAGE_STAGING; else pipe_usage = PIPE_USAGE_DEFAULT; } else { /* BufferData */ switch (usage) { case GL_STATIC_DRAW: case GL_STATIC_READ: case GL_STATIC_COPY: default: pipe_usage = PIPE_USAGE_DEFAULT; break; case GL_DYNAMIC_DRAW: case GL_DYNAMIC_READ: case GL_DYNAMIC_COPY: pipe_usage = PIPE_USAGE_DYNAMIC; break; case GL_STREAM_DRAW: case GL_STREAM_READ: case GL_STREAM_COPY: pipe_usage = PIPE_USAGE_STREAM; break; } } /* Set flags. */ if (storageFlags & GL_MAP_PERSISTENT_BIT) pipe_flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT; if (storageFlags & GL_MAP_COHERENT_BIT) pipe_flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT; pipe_resource_reference( &st_obj->buffer, NULL ); if (ST_DEBUG & DEBUG_BUFFER) { debug_printf("Create buffer size %td bind 0x%x\n", size, bind); } if (size != 0) { struct pipe_resource buffer; memset(&buffer, 0, sizeof buffer); buffer.target = PIPE_BUFFER; buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */ buffer.bind = bind; buffer.usage = pipe_usage; buffer.flags = pipe_flags; buffer.width0 = size; buffer.height0 = 1; buffer.depth0 = 1; buffer.array_size = 1; st_obj->buffer = pipe->screen->resource_create(pipe->screen, &buffer); if (!st_obj->buffer) { /* out of memory */ st_obj->Base.Size = 0; return GL_FALSE; } if (data) pipe_buffer_write(pipe, st_obj->buffer, 0, size, data); } /* BufferData may change an array or uniform buffer, need to update it */ st->dirty.st |= ST_NEW_VERTEX_ARRAYS | ST_NEW_UNIFORM_BUFFER; return GL_TRUE; }
/* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit * kernel parameters there are implicit parameters that need to be stored * in the vertex buffer as well. Here is how these parameters are organized in * the buffer: * * DWORDS 0-2: Number of work groups in each dimension (x,y,z) * DWORDS 3-5: Number of global work items in each dimension (x,y,z) * DWORDS 6-8: Number of work items within each work group in each dimension * (x,y,z) * DWORDS 9+ : Kernel parameters */ void evergreen_compute_upload_input( struct pipe_context *ctx_, const uint *block_layout, const uint *grid_layout, const void *input) { struct r600_context *ctx = (struct r600_context *)ctx_; struct r600_pipe_compute *shader = ctx->cs_shader_state.shader; unsigned i; /* We need to reserve 9 dwords (36 bytes) for implicit kernel * parameters. */ unsigned input_size = shader->input_size + 36; uint32_t * num_work_groups_start; uint32_t * global_size_start; uint32_t * local_size_start; uint32_t * kernel_parameters_start; struct pipe_box box; struct pipe_transfer *transfer = NULL; if (shader->input_size == 0) { return; } if (!shader->kernel_param) { /* Add space for the grid dimensions */ shader->kernel_param = (struct r600_resource *) pipe_buffer_create(ctx_->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, input_size); } u_box_1d(0, input_size, &box); num_work_groups_start = ctx_->transfer_map(ctx_, (struct pipe_resource*)shader->kernel_param, 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE, &box, &transfer); global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4)); local_size_start = global_size_start + (3 * (sizeof(uint)) / 4); kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4); /* Copy the work group size */ memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint)); /* Copy the global size */ for (i = 0; i < 3; i++) { global_size_start[i] = grid_layout[i] * block_layout[i]; } /* Copy the local dimensions */ memcpy(local_size_start, block_layout, 3 * sizeof(uint)); /* Copy the kernel inputs */ memcpy(kernel_parameters_start, input, shader->input_size); for (i = 0; i < (input_size / 4); i++) { COMPUTE_DBG(ctx->screen, "input %i : %u\n", i, ((unsigned*)num_work_groups_start)[i]); } ctx_->transfer_unmap(ctx_, transfer); /* ID=0 is reserved for the parameters */ evergreen_cs_set_constant_buffer(ctx, 0, 0, input_size, (struct pipe_resource*)shader->kernel_param); }
HRESULT NineBuffer9_ctor( struct NineBuffer9 *This, struct NineUnknownParams *pParams, D3DRESOURCETYPE Type, DWORD Usage, UINT Size, D3DPOOL Pool ) { struct pipe_resource *info = &This->base.info; HRESULT hr; DBG("This=%p Size=0x%x Usage=%x Pool=%u\n", This, Size, Usage, Pool); user_assert(Pool != D3DPOOL_SCRATCH, D3DERR_INVALIDCALL); This->maps = MALLOC(sizeof(struct pipe_transfer *)); if (!This->maps) return E_OUTOFMEMORY; This->nmaps = 0; This->maxmaps = 1; This->size = Size; This->pipe = pParams->device->pipe; info->screen = pParams->device->screen; info->target = PIPE_BUFFER; info->format = PIPE_FORMAT_R8_UNORM; info->width0 = Size; info->flags = 0; info->bind = PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_TRANSFER_WRITE; if (!(Usage & D3DUSAGE_WRITEONLY)) info->bind |= PIPE_BIND_TRANSFER_READ; info->usage = PIPE_USAGE_DEFAULT; if (Usage & D3DUSAGE_DYNAMIC) info->usage = PIPE_USAGE_STREAM; else if (Pool == D3DPOOL_SYSTEMMEM) info->usage = PIPE_USAGE_STAGING; /* if (pDesc->Usage & D3DUSAGE_DONOTCLIP) { } */ /* if (pDesc->Usage & D3DUSAGE_NONSECURE) { } */ /* if (pDesc->Usage & D3DUSAGE_NPATCHES) { } */ /* if (pDesc->Usage & D3DUSAGE_POINTS) { } */ /* if (pDesc->Usage & D3DUSAGE_RTPATCHES) { } */ if (Usage & D3DUSAGE_SOFTWAREPROCESSING) DBG("Application asked for Software Vertex Processing, " "but this is unimplemented\n"); /* if (pDesc->Usage & D3DUSAGE_TEXTAPI) { } */ info->height0 = 1; info->depth0 = 1; info->array_size = 1; info->last_level = 0; info->nr_samples = 0; hr = NineResource9_ctor(&This->base, pParams, NULL, TRUE, Type, Pool, Usage); if (FAILED(hr)) return hr; if (Pool == D3DPOOL_MANAGED) { This->managed.data = align_malloc( nine_format_get_level_alloc_size(This->base.info.format, Size, 1, 0), 32); if (!This->managed.data) return E_OUTOFMEMORY; memset(This->managed.data, 0, Size); This->managed.dirty = TRUE; u_box_1d(0, Size, &This->managed.dirty_box); list_inithead(&This->managed.list); list_inithead(&This->managed.list2); list_add(&This->managed.list, &pParams->device->update_buffers); list_add(&This->managed.list2, &pParams->device->managed_buffers); } return D3D_OK; }
/** * Allocate space for and store data in a buffer object. Any data that was * previously stored in the buffer object is lost. If data is NULL, * memory will be allocated, but no copy will occur. * Called via ctx->Driver.BufferData(). * \return GL_TRUE for success, GL_FALSE if out of memory */ static GLboolean st_bufferobj_data(struct gl_context *ctx, GLenum target, GLsizeiptrARB size, const GLvoid * data, GLenum usage, struct gl_buffer_object *obj) { struct st_context *st = st_context(ctx); struct pipe_context *pipe = st->pipe; struct st_buffer_object *st_obj = st_buffer_object(obj); unsigned bind, pipe_usage; if (size && data && st_obj->buffer && st_obj->Base.Size == size && st_obj->Base.Usage == usage) { /* Just discard the old contents and write new data. * This should be the same as creating a new buffer, but we avoid * a lot of validation in Mesa. */ struct pipe_box box; u_box_1d(0, size, &box); pipe->transfer_inline_write(pipe, st_obj->buffer, 0, PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE, &box, data, 0, 0); return GL_TRUE; } st_obj->Base.Size = size; st_obj->Base.Usage = usage; switch (target) { case GL_PIXEL_PACK_BUFFER_ARB: case GL_PIXEL_UNPACK_BUFFER_ARB: bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW; break; case GL_ARRAY_BUFFER_ARB: bind = PIPE_BIND_VERTEX_BUFFER; break; case GL_ELEMENT_ARRAY_BUFFER_ARB: bind = PIPE_BIND_INDEX_BUFFER; break; case GL_TEXTURE_BUFFER: bind = PIPE_BIND_SAMPLER_VIEW; break; case GL_TRANSFORM_FEEDBACK_BUFFER: bind = PIPE_BIND_STREAM_OUTPUT; break; case GL_UNIFORM_BUFFER: bind = PIPE_BIND_CONSTANT_BUFFER; break; default: bind = 0; } switch (usage) { case GL_STATIC_DRAW: case GL_STATIC_READ: case GL_STATIC_COPY: default: pipe_usage = PIPE_USAGE_DEFAULT; break; case GL_DYNAMIC_DRAW: case GL_DYNAMIC_READ: case GL_DYNAMIC_COPY: pipe_usage = PIPE_USAGE_DYNAMIC; break; case GL_STREAM_DRAW: case GL_STREAM_READ: case GL_STREAM_COPY: pipe_usage = PIPE_USAGE_STREAM; break; } pipe_resource_reference( &st_obj->buffer, NULL ); if (ST_DEBUG & DEBUG_BUFFER) { debug_printf("Create buffer size %td bind 0x%x\n", size, bind); } if (size != 0) { st_obj->buffer = pipe_buffer_create(pipe->screen, bind, pipe_usage, size); if (!st_obj->buffer) { /* out of memory */ st_obj->Base.Size = 0; return GL_FALSE; } if (data) pipe_buffer_write(pipe, st_obj->buffer, 0, size, data); } /* BufferData may change an array or uniform buffer, need to update it */ st->dirty.st |= ST_NEW_VERTEX_ARRAYS | ST_NEW_UNIFORM_BUFFER; return GL_TRUE; }
/** * Allocate space for and store data in a buffer object. Any data that was * previously stored in the buffer object is lost. If data is NULL, * memory will be allocated, but no copy will occur. * Called via ctx->Driver.BufferData(). * \return GL_TRUE for success, GL_FALSE if out of memory */ static GLboolean st_bufferobj_data(struct gl_context *ctx, GLenum target, GLsizeiptrARB size, const GLvoid * data, GLenum usage, GLbitfield storageFlags, struct gl_buffer_object *obj) { struct st_context *st = st_context(ctx); struct pipe_context *pipe = st->pipe; struct pipe_screen *screen = pipe->screen; struct st_buffer_object *st_obj = st_buffer_object(obj); unsigned bind, pipe_usage, pipe_flags = 0; if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD && size && st_obj->buffer && st_obj->Base.Size == size && st_obj->Base.Usage == usage && st_obj->Base.StorageFlags == storageFlags) { if (data) { /* Just discard the old contents and write new data. * This should be the same as creating a new buffer, but we avoid * a lot of validation in Mesa. */ struct pipe_box box; u_box_1d(0, size, &box); pipe->transfer_inline_write(pipe, st_obj->buffer, 0, PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE, &box, data, 0, 0); return GL_TRUE; } else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) { pipe->invalidate_resource(pipe, st_obj->buffer); return GL_TRUE; } } st_obj->Base.Size = size; st_obj->Base.Usage = usage; st_obj->Base.StorageFlags = storageFlags; switch (target) { case GL_PIXEL_PACK_BUFFER_ARB: case GL_PIXEL_UNPACK_BUFFER_ARB: bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW; break; case GL_ARRAY_BUFFER_ARB: bind = PIPE_BIND_VERTEX_BUFFER; break; case GL_ELEMENT_ARRAY_BUFFER_ARB: bind = PIPE_BIND_INDEX_BUFFER; break; case GL_TEXTURE_BUFFER: bind = PIPE_BIND_SAMPLER_VIEW; break; case GL_TRANSFORM_FEEDBACK_BUFFER: bind = PIPE_BIND_STREAM_OUTPUT; break; case GL_UNIFORM_BUFFER: bind = PIPE_BIND_CONSTANT_BUFFER; break; case GL_DRAW_INDIRECT_BUFFER: case GL_PARAMETER_BUFFER_ARB: bind = PIPE_BIND_COMMAND_ARGS_BUFFER; break; case GL_ATOMIC_COUNTER_BUFFER: case GL_SHADER_STORAGE_BUFFER: bind = PIPE_BIND_SHADER_BUFFER; break; case GL_QUERY_BUFFER: bind = PIPE_BIND_QUERY_BUFFER; break; default: bind = 0; } /* Set usage. */ if (st_obj->Base.Immutable) { /* BufferStorage */ if (storageFlags & GL_CLIENT_STORAGE_BIT) pipe_usage = PIPE_USAGE_STAGING; else pipe_usage = PIPE_USAGE_DEFAULT; } else { /* BufferData */ switch (usage) { case GL_STATIC_DRAW: case GL_STATIC_COPY: default: pipe_usage = PIPE_USAGE_DEFAULT; break; case GL_DYNAMIC_DRAW: case GL_DYNAMIC_COPY: pipe_usage = PIPE_USAGE_DYNAMIC; break; case GL_STREAM_DRAW: case GL_STREAM_COPY: /* XXX: Remove this test and fall-through when we have PBO unpacking * acceleration. Right now, PBO unpacking is done by the CPU, so we * have to make sure CPU reads are fast. */ if (target != GL_PIXEL_UNPACK_BUFFER_ARB) { pipe_usage = PIPE_USAGE_STREAM; break; } /* fall through */ case GL_STATIC_READ: case GL_DYNAMIC_READ: case GL_STREAM_READ: pipe_usage = PIPE_USAGE_STAGING; break; } } /* Set flags. */ if (storageFlags & GL_MAP_PERSISTENT_BIT) pipe_flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT; if (storageFlags & GL_MAP_COHERENT_BIT) pipe_flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT; pipe_resource_reference( &st_obj->buffer, NULL ); if (ST_DEBUG & DEBUG_BUFFER) { debug_printf("Create buffer size %" PRId64 " bind 0x%x\n", (int64_t) size, bind); } if (size != 0) { struct pipe_resource buffer; memset(&buffer, 0, sizeof buffer); buffer.target = PIPE_BUFFER; buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */ buffer.bind = bind; buffer.usage = pipe_usage; buffer.flags = pipe_flags; buffer.width0 = size; buffer.height0 = 1; buffer.depth0 = 1; buffer.array_size = 1; if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) { st_obj->buffer = screen->resource_from_user_memory(screen, &buffer, (void*)data); } else { st_obj->buffer = screen->resource_create(screen, &buffer); if (st_obj->buffer && data) pipe_buffer_write(pipe, st_obj->buffer, 0, size, data); } if (!st_obj->buffer) { /* out of memory */ st_obj->Base.Size = 0; return GL_FALSE; } } /* BufferData may change an array or uniform buffer, need to update it */ st->dirty.st |= ST_NEW_VERTEX_ARRAYS | ST_NEW_UNIFORM_BUFFER; return GL_TRUE; }