static void setup_index_buffer(struct st_context *st, const struct _mesa_index_buffer *ib, struct pipe_index_buffer *ibuffer) { struct gl_buffer_object *bufobj = ib->obj; ibuffer->index_size = vbo_sizeof_ib_type(ib->type); /* get/create the index buffer object */ if (_mesa_is_bufferobj(bufobj)) { /* indices are in a real VBO */ ibuffer->buffer = st_buffer_object(bufobj)->buffer; ibuffer->offset = pointer_to_offset(ib->ptr); } else if (st->indexbuf_uploader) { u_upload_data(st->indexbuf_uploader, 0, ib->count * ibuffer->index_size, ib->ptr, &ibuffer->offset, &ibuffer->buffer); u_upload_unmap(st->indexbuf_uploader); } else { /* indices are in user space memory */ ibuffer->user_buffer = ib->ptr; } cso_set_index_buffer(st->cso_context, ibuffer); }
/** * Called via glMapBufferARB(). */ static void * st_bufferobj_map(GLcontext *ctx, GLenum target, GLenum access, struct gl_buffer_object *obj) { struct st_buffer_object *st_obj = st_buffer_object(obj); GLuint flags; switch (access) { case GL_WRITE_ONLY: flags = PIPE_BUFFER_USAGE_CPU_WRITE; break; case GL_READ_ONLY: flags = PIPE_BUFFER_USAGE_CPU_READ; break; case GL_READ_WRITE: /* fall-through */ default: flags = PIPE_BUFFER_USAGE_CPU_READ | PIPE_BUFFER_USAGE_CPU_WRITE; break; } obj->Pointer = st_cond_flush_pipe_buffer_map(st_context(ctx), st_obj->buffer, flags); if(obj->Pointer) { obj->Offset = 0; obj->Length = obj->Size; } return obj->Pointer; }
/** * Basically, translate Mesa's index buffer information into * a pipe_index_buffer object. * \return TRUE or FALSE for success/failure */ static boolean setup_index_buffer(struct st_context *st, const struct _mesa_index_buffer *ib, struct pipe_index_buffer *ibuffer) { struct gl_buffer_object *bufobj = ib->obj; ibuffer->index_size = vbo_sizeof_ib_type(ib->type); /* get/create the index buffer object */ if (_mesa_is_bufferobj(bufobj)) { /* indices are in a real VBO */ ibuffer->buffer = st_buffer_object(bufobj)->buffer; ibuffer->offset = pointer_to_offset(ib->ptr); } else if (st->indexbuf_uploader) { /* upload indexes from user memory into a real buffer */ if (u_upload_data(st->indexbuf_uploader, 0, ib->count * ibuffer->index_size, ib->ptr, &ibuffer->offset, &ibuffer->buffer) != PIPE_OK) { /* out of memory */ return FALSE; } u_upload_unmap(st->indexbuf_uploader); } else { /* indices are in user space memory */ ibuffer->user_buffer = ib->ptr; } cso_set_index_buffer(st->cso_context, ibuffer); return TRUE; }
/** * Called via glMapBufferRange(). */ static void * st_bufferobj_map_range(struct gl_context *ctx, GLintptr offset, GLsizeiptr length, GLbitfield access, struct gl_buffer_object *obj) { struct pipe_context *pipe = st_context(ctx)->pipe; struct st_buffer_object *st_obj = st_buffer_object(obj); enum pipe_transfer_usage flags = 0x0; if (access & GL_MAP_WRITE_BIT) flags |= PIPE_TRANSFER_WRITE; if (access & GL_MAP_READ_BIT) flags |= PIPE_TRANSFER_READ; if (access & GL_MAP_FLUSH_EXPLICIT_BIT) flags |= PIPE_TRANSFER_FLUSH_EXPLICIT; if (access & GL_MAP_INVALIDATE_BUFFER_BIT) { flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE; } else if (access & GL_MAP_INVALIDATE_RANGE_BIT) { if (offset == 0 && length == obj->Size) flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE; else flags |= PIPE_TRANSFER_DISCARD_RANGE; } if (access & GL_MAP_UNSYNCHRONIZED_BIT) flags |= PIPE_TRANSFER_UNSYNCHRONIZED; /* ... other flags ... */ if (access & MESA_MAP_NOWAIT_BIT) flags |= PIPE_TRANSFER_DONTBLOCK; assert(offset >= 0); assert(length >= 0); assert(offset < obj->Size); assert(offset + length <= obj->Size); obj->Pointer = pipe_buffer_map_range(pipe, st_obj->buffer, offset, length, flags, &st_obj->transfer); if (obj->Pointer) { obj->Pointer = (ubyte *) obj->Pointer + offset; } if (obj->Pointer) { obj->Offset = offset; obj->Length = length; obj->AccessFlags = access; } return obj->Pointer; }
static void st_dispatch_compute_indirect(struct gl_context *ctx, GLintptr indirect_offset) { struct gl_buffer_object *indirect_buffer = ctx->DispatchIndirectBuffer; struct pipe_resource *indirect = st_buffer_object(indirect_buffer)->buffer; st_dispatch_compute_common(ctx, NULL, NULL, indirect, indirect_offset); }
/* Validate and fill buffer addressing information based on GL pixelstore * attributes. * * Returns false if some aspect of the addressing (e.g. alignment) prevents * PBO upload/download. */ bool st_pbo_addresses_pixelstore(struct st_context *st, GLenum gl_target, bool skip_images, const struct gl_pixelstore_attrib *store, const void *pixels, struct st_pbo_addresses *addr) { struct pipe_resource *buf = st_buffer_object(store->BufferObj)->buffer; intptr_t buf_offset = (intptr_t) pixels; if (buf_offset % addr->bytes_per_pixel) return false; /* Convert to texels */ buf_offset = buf_offset / addr->bytes_per_pixel; /* Determine image height */ if (gl_target == GL_TEXTURE_1D_ARRAY) { addr->image_height = 1; } else { addr->image_height = store->ImageHeight > 0 ? store->ImageHeight : addr->height; } /* Compute the stride, taking store->Alignment into account */ { unsigned pixels_per_row = store->RowLength > 0 ? store->RowLength : addr->width; unsigned bytes_per_row = pixels_per_row * addr->bytes_per_pixel; unsigned remainder = bytes_per_row % store->Alignment; unsigned offset_rows; if (remainder > 0) bytes_per_row += store->Alignment - remainder; if (bytes_per_row % addr->bytes_per_pixel) return false; addr->pixels_per_row = bytes_per_row / addr->bytes_per_pixel; offset_rows = store->SkipRows; if (skip_images) offset_rows += addr->image_height * store->SkipImages; buf_offset += store->SkipPixels + addr->pixels_per_row * offset_rows; } if (!st_pbo_addresses_setup(st, buf, buf_offset, addr)) return false; /* Support GL_PACK_INVERT_MESA */ if (store->Invert) { addr->constants.xoffset += (addr->height - 1) * addr->constants.stride; addr->constants.stride = -addr->constants.stride; } return true; }
void st_setup_arrays(struct st_context *st, const struct st_vertex_program *vp, const struct st_vp_variant *vp_variant, struct pipe_vertex_element *velements, struct pipe_vertex_buffer *vbuffer, unsigned *num_vbuffers) { struct gl_context *ctx = st->ctx; const struct gl_vertex_array_object *vao = ctx->Array._DrawVAO; const GLbitfield inputs_read = vp_variant->vert_attrib_mask; const ubyte *input_to_index = vp->input_to_index; /* Process attribute array data. */ GLbitfield mask = inputs_read & _mesa_draw_array_bits(ctx); while (mask) { /* The attribute index to start pulling a binding */ const gl_vert_attrib i = ffs(mask) - 1; const struct gl_vertex_buffer_binding *const binding = _mesa_draw_buffer_binding(vao, i); const unsigned bufidx = (*num_vbuffers)++; if (_mesa_is_bufferobj(binding->BufferObj)) { /* Set the binding */ struct st_buffer_object *stobj = st_buffer_object(binding->BufferObj); vbuffer[bufidx].buffer.resource = stobj ? stobj->buffer : NULL; vbuffer[bufidx].is_user_buffer = false; vbuffer[bufidx].buffer_offset = _mesa_draw_binding_offset(binding); } else { /* Set the binding */ const void *ptr = (const void *)_mesa_draw_binding_offset(binding); vbuffer[bufidx].buffer.user = ptr; vbuffer[bufidx].is_user_buffer = true; vbuffer[bufidx].buffer_offset = 0; if (!binding->InstanceDivisor) st->draw_needs_minmax_index = true; } vbuffer[bufidx].stride = binding->Stride; /* in bytes */ const GLbitfield boundmask = _mesa_draw_bound_attrib_bits(binding); GLbitfield attrmask = mask & boundmask; /* Mark the those attributes as processed */ mask &= ~boundmask; /* We can assume that we have array for the binding */ assert(attrmask); /* Walk attributes belonging to the binding */ while (attrmask) { const gl_vert_attrib attr = u_bit_scan(&attrmask); const struct gl_array_attributes *const attrib = _mesa_draw_array_attrib(vao, attr); const GLuint off = _mesa_draw_attributes_relative_offset(attrib); init_velement_lowered(vp, velements, &attrib->Format, off, binding->InstanceDivisor, bufidx, input_to_index[attr]); } } }
/** * Deallocate/free a vertex/pixel buffer object. * Called via glDeleteBuffersARB(). */ static void st_bufferobj_free(GLcontext *ctx, struct gl_buffer_object *obj) { struct st_buffer_object *st_obj = st_buffer_object(obj); if (st_obj->buffer) pipe_buffer_reference(&st_obj->buffer, NULL); _mesa_free(st_obj); }
/** * Called via glUnmapBufferARB(). */ static GLboolean st_bufferobj_unmap(GLcontext *ctx, GLenum target, struct gl_buffer_object *obj) { struct pipe_context *pipe = st_context(ctx)->pipe; struct st_buffer_object *st_obj = st_buffer_object(obj); pipe_buffer_unmap(pipe->screen, st_obj->buffer); obj->Pointer = NULL; obj->Offset = 0; obj->Length = 0; return GL_TRUE; }
/** * Deallocate/free a vertex/pixel buffer object. * Called via glDeleteBuffersARB(). */ static void st_bufferobj_free(struct gl_context *ctx, struct gl_buffer_object *obj) { struct st_buffer_object *st_obj = st_buffer_object(obj); assert(obj->RefCount == 0); assert(st_obj->transfer == NULL); if (st_obj->buffer) pipe_resource_reference(&st_obj->buffer, NULL); free(st_obj); }
/** * Deallocate/free a vertex/pixel buffer object. * Called via glDeleteBuffersARB(). */ static void st_bufferobj_free(struct gl_context *ctx, struct gl_buffer_object *obj) { struct st_buffer_object *st_obj = st_buffer_object(obj); assert(obj->RefCount == 0); _mesa_buffer_unmap_all_mappings(ctx, obj); if (st_obj->buffer) pipe_resource_reference(&st_obj->buffer, NULL); _mesa_delete_buffer_object(ctx, obj); }
static void st_bind_ssbos(struct st_context *st, struct gl_program *prog, enum pipe_shader_type shader_type) { unsigned i; struct pipe_shader_buffer buffers[MAX_SHADER_STORAGE_BUFFERS]; struct gl_program_constants *c; if (!prog || !st->pipe->set_shader_buffers) return; c = &st->ctx->Const.Program[prog->info.stage]; for (i = 0; i < prog->info.num_ssbos; i++) { struct gl_buffer_binding *binding; struct st_buffer_object *st_obj; struct pipe_shader_buffer *sb = &buffers[i]; binding = &st->ctx->ShaderStorageBufferBindings[ prog->sh.ShaderStorageBlocks[i]->Binding]; st_obj = st_buffer_object(binding->BufferObject); sb->buffer = st_obj->buffer; if (sb->buffer) { sb->buffer_offset = binding->Offset; sb->buffer_size = sb->buffer->width0 - binding->Offset; /* AutomaticSize is FALSE if the buffer was set with BindBufferRange. * Take the minimum just to be sure. */ if (!binding->AutomaticSize) sb->buffer_size = MIN2(sb->buffer_size, (unsigned) binding->Size); } else { sb->buffer_offset = 0; sb->buffer_size = 0; } } st->pipe->set_shader_buffers(st->pipe, shader_type, c->MaxAtomicBuffers, prog->info.num_ssbos, buffers); /* clear out any stale shader buffers */ if (prog->info.num_ssbos < c->MaxShaderStorageBlocks) st->pipe->set_shader_buffers( st->pipe, shader_type, c->MaxAtomicBuffers + prog->info.num_ssbos, c->MaxShaderStorageBlocks - prog->info.num_ssbos, NULL); }
/** * Called via glCopyBufferSubData(). */ static void st_copy_buffer_subdata(struct gl_context *ctx, struct gl_buffer_object *src, struct gl_buffer_object *dst, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size) { struct pipe_context *pipe = st_context(ctx)->pipe; struct st_buffer_object *srcObj = st_buffer_object(src); struct st_buffer_object *dstObj = st_buffer_object(dst); struct pipe_box box; if (!size) return; /* buffer should not already be mapped */ assert(!src->Pointer); assert(!dst->Pointer); u_box_1d(readOffset, size, &box); pipe->resource_copy_region(pipe, dstObj->buffer, 0, writeOffset, 0, 0, srcObj->buffer, 0, &box); }
/** * Replace data in a subrange of buffer object. If the data range * specified by size + offset extends beyond the end of the buffer or * if data is NULL, no copy is performed. * Called via glBufferSubDataARB(). */ static void st_bufferobj_subdata(GLcontext *ctx, GLenum target, GLintptrARB offset, GLsizeiptrARB size, const GLvoid * data, struct gl_buffer_object *obj) { struct st_buffer_object *st_obj = st_buffer_object(obj); if (offset >= st_obj->size || size > (st_obj->size - offset)) return; st_cond_flush_pipe_buffer_write(st_context(ctx), st_obj->buffer, offset, size, data); }
/** * Called via glUnmapBufferARB(). */ static GLboolean st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj) { struct pipe_context *pipe = st_context(ctx)->pipe; struct st_buffer_object *st_obj = st_buffer_object(obj); if (obj->Length) pipe_buffer_unmap(pipe, st_obj->transfer); st_obj->transfer = NULL; obj->Pointer = NULL; obj->Offset = 0; obj->Length = 0; return GL_TRUE; }
/** * Called via glUnmapBufferARB(). */ static GLboolean st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj, gl_map_buffer_index index) { struct pipe_context *pipe = st_context(ctx)->pipe; struct st_buffer_object *st_obj = st_buffer_object(obj); if (obj->Mappings[index].Length) pipe_buffer_unmap(pipe, st_obj->transfer[index]); st_obj->transfer[index] = NULL; obj->Mappings[index].Pointer = NULL; obj->Mappings[index].Offset = 0; obj->Mappings[index].Length = 0; return GL_TRUE; }
static void st_bufferobj_flush_mapped_range(GLcontext *ctx, GLenum target, GLintptr offset, GLsizeiptr length, struct gl_buffer_object *obj) { struct pipe_context *pipe = st_context(ctx)->pipe; struct st_buffer_object *st_obj = st_buffer_object(obj); /* Subrange is relative to mapped range */ assert(offset >= 0); assert(length >= 0); assert(offset + length <= obj->Length); pipe_buffer_flush_mapped_range(pipe->screen, st_obj->buffer, obj->Offset + offset, length); }
/** * Allocate space for and store data in a buffer object. Any data that was * previously stored in the buffer object is lost. If data is NULL, * memory will be allocated, but no copy will occur. * Called via glBufferDataARB(). */ static void st_bufferobj_data(GLcontext *ctx, GLenum target, GLsizeiptrARB size, const GLvoid * data, GLenum usage, struct gl_buffer_object *obj) { struct st_context *st = st_context(ctx); struct pipe_context *pipe = st->pipe; struct st_buffer_object *st_obj = st_buffer_object(obj); unsigned buffer_usage; st_obj->Base.Size = size; st_obj->Base.Usage = usage; switch(target) { case GL_PIXEL_PACK_BUFFER_ARB: case GL_PIXEL_UNPACK_BUFFER_ARB: buffer_usage = PIPE_BUFFER_USAGE_PIXEL; break; case GL_ARRAY_BUFFER_ARB: buffer_usage = PIPE_BUFFER_USAGE_VERTEX; break; case GL_ELEMENT_ARRAY_BUFFER_ARB: buffer_usage = PIPE_BUFFER_USAGE_INDEX; break; default: buffer_usage = 0; } pipe_buffer_reference( &st_obj->buffer, NULL ); st_obj->buffer = pipe_buffer_create( pipe->screen, 32, buffer_usage, size ); if (!st_obj->buffer) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glBufferDataARB"); return; } st_obj->size = size; if (data) st_no_flush_pipe_buffer_write(st_context(ctx), st_obj->buffer, 0, size, data); }
/* XXX Do we really need the mode? */ static void st_begin_transform_feedback(struct gl_context *ctx, GLenum mode, struct gl_transform_feedback_object *obj) { struct st_context *st = st_context(ctx); struct pipe_context *pipe = st->pipe; struct st_transform_feedback_object *sobj = st_transform_feedback_object(obj); unsigned i, max_num_targets; max_num_targets = MIN2(Elements(sobj->base.Buffers), Elements(sobj->targets)); /* Convert the transform feedback state into the gallium representation. */ for (i = 0; i < max_num_targets; i++) { struct st_buffer_object *bo = st_buffer_object(sobj->base.Buffers[i]); if (bo) { /* Check whether we need to recreate the target. */ if (!sobj->targets[i] || sobj->targets[i] == sobj->draw_count || sobj->targets[i]->buffer != bo->buffer || sobj->targets[i]->buffer_offset != sobj->base.Offset[i] || sobj->targets[i]->buffer_size != sobj->base.Size[i]) { /* Create a new target. */ struct pipe_stream_output_target *so_target = pipe->create_stream_output_target(pipe, bo->buffer, sobj->base.Offset[i], sobj->base.Size[i]); pipe_so_target_reference(&sobj->targets[i], NULL); sobj->targets[i] = so_target; } sobj->num_targets = i+1; } else { pipe_so_target_reference(&sobj->targets[i], NULL); } } /* Start writing at the beginning of each target. */ cso_set_stream_outputs(st->cso_context, sobj->num_targets, sobj->targets, 0); }
/** * Called via glInvalidateBuffer(Sub)Data. */ static void st_bufferobj_invalidate(struct gl_context *ctx, struct gl_buffer_object *obj, GLintptr offset, GLsizeiptr size) { struct st_context *st = st_context(ctx); struct pipe_context *pipe = st->pipe; struct st_buffer_object *st_obj = st_buffer_object(obj); /* We ignore partial invalidates. */ if (offset != 0 || size != obj->Size) return; /* Nothing to invalidate. */ if (!st_obj->buffer) return; pipe->invalidate_resource(pipe, st_obj->buffer); }
static void st_bufferobj_flush_mapped_range(struct gl_context *ctx, GLintptr offset, GLsizeiptr length, struct gl_buffer_object *obj) { struct pipe_context *pipe = st_context(ctx)->pipe; struct st_buffer_object *st_obj = st_buffer_object(obj); /* Subrange is relative to mapped range */ assert(offset >= 0); assert(length >= 0); assert(offset + length <= obj->Length); assert(obj->Pointer); if (!length) return; pipe_buffer_flush_mapped_range(pipe, st_obj->transfer, obj->Offset + offset, length); }
/** * Replace data in a subrange of buffer object. If the data range * specified by size + offset extends beyond the end of the buffer or * if data is NULL, no copy is performed. * Called via glBufferSubDataARB(). */ static void st_bufferobj_subdata(struct gl_context *ctx, GLintptrARB offset, GLsizeiptrARB size, const GLvoid * data, struct gl_buffer_object *obj) { struct st_buffer_object *st_obj = st_buffer_object(obj); /* we may be called from VBO code, so double-check params here */ ASSERT(offset >= 0); ASSERT(size >= 0); ASSERT(offset + size <= obj->Size); if (!size) return; /* * According to ARB_vertex_buffer_object specification, if data is null, * then the contents of the buffer object's data store is undefined. We just * ignore, and leave it unchanged. */ if (!data) return; if (!st_obj->buffer) { /* we probably ran out of memory during buffer allocation */ return; } /* Now that transfers are per-context, we don't have to figure out * flushing here. Usually drivers won't need to flush in this case * even if the buffer is currently referenced by hardware - they * just queue the upload as dma rather than mapping the underlying * buffer directly. */ pipe_buffer_write(st_context(ctx)->pipe, st_obj->buffer, offset, size, data); }
/** * Called via glMapBufferRange(). */ static void * st_bufferobj_map_range(GLcontext *ctx, GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access, struct gl_buffer_object *obj) { struct pipe_context *pipe = st_context(ctx)->pipe; struct st_buffer_object *st_obj = st_buffer_object(obj); GLuint flags = 0; char *map; if (access & GL_MAP_WRITE_BIT) flags |= PIPE_BUFFER_USAGE_CPU_WRITE; if (access & GL_MAP_READ_BIT) flags |= PIPE_BUFFER_USAGE_CPU_READ; if (access & GL_MAP_FLUSH_EXPLICIT_BIT) flags |= PIPE_BUFFER_USAGE_FLUSH_EXPLICIT; /* ... other flags ... */ if (access & MESA_MAP_NOWAIT_BIT) flags |= PIPE_BUFFER_USAGE_DONTBLOCK; assert(offset >= 0); assert(length >= 0); assert(offset < obj->Size); assert(offset + length <= obj->Size); map = obj->Pointer = pipe_buffer_map_range(pipe->screen, st_obj->buffer, offset, length, flags); if(obj->Pointer) { obj->Offset = offset; obj->Length = length; map += offset; } return map; }
/** * Called via glClearBufferSubData(). */ static void st_clear_buffer_subdata(struct gl_context *ctx, GLintptr offset, GLsizeiptr size, const GLvoid *clearValue, GLsizeiptr clearValueSize, struct gl_buffer_object *bufObj) { struct pipe_context *pipe = st_context(ctx)->pipe; struct st_buffer_object *buf = st_buffer_object(bufObj); static const char zeros[16] = {0}; if (!pipe->clear_buffer) { _mesa_buffer_clear_subdata(ctx, offset, size, clearValue, clearValueSize, bufObj); return; } if (!clearValue) clearValue = zeros; pipe->clear_buffer(pipe, buf->buffer, offset, size, clearValue, clearValueSize); }
static void st_bind_ubos(struct st_context *st, struct gl_program *prog, unsigned shader_type) { unsigned i; struct pipe_constant_buffer cb = { 0 }; if (!prog) return; for (i = 0; i < prog->info.num_ubos; i++) { struct gl_buffer_binding *binding; struct st_buffer_object *st_obj; binding = &st->ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding]; st_obj = st_buffer_object(binding->BufferObject); cb.buffer = st_obj->buffer; if (cb.buffer) { cb.buffer_offset = binding->Offset; cb.buffer_size = cb.buffer->width0 - binding->Offset; /* AutomaticSize is FALSE if the buffer was set with BindBufferRange. * Take the minimum just to be sure. */ if (!binding->AutomaticSize) cb.buffer_size = MIN2(cb.buffer_size, (unsigned) binding->Size); } else { cb.buffer_offset = 0; cb.buffer_size = 0; } cso_set_constant_buffer(st->cso_context, shader_type, 1 + i, &cb); } }
/** * Called via glGetBufferSubDataARB(). */ static void st_bufferobj_get_subdata(struct gl_context *ctx, GLintptrARB offset, GLsizeiptrARB size, GLvoid * data, struct gl_buffer_object *obj) { struct st_buffer_object *st_obj = st_buffer_object(obj); /* we may be called from VBO code, so double-check params here */ ASSERT(offset >= 0); ASSERT(size >= 0); ASSERT(offset + size <= obj->Size); if (!size) return; if (!st_obj->buffer) { /* we probably ran out of memory during buffer allocation */ return; } pipe_buffer_read(st_context(ctx)->pipe, st_obj->buffer, offset, size, data); }
static void st_bind_ubos(struct st_context *st, struct gl_shader *shader, unsigned shader_type) { unsigned i; struct pipe_constant_buffer cb = { 0 }; if (!shader) return; for (i = 0; i < shader->NumUniformBlocks; i++) { struct gl_uniform_buffer_binding *binding; struct st_buffer_object *st_obj; binding = &st->ctx->UniformBufferBindings[shader->UniformBlocks[i].Binding]; st_obj = st_buffer_object(binding->BufferObject); pipe_resource_reference(&cb.buffer, st_obj->buffer); cb.buffer_size = st_obj->buffer->width0 - binding->Offset; st->pipe->set_constant_buffer(st->pipe, shader_type, 1 + i, &cb); pipe_resource_reference(&cb.buffer, NULL); } }
/** * Set up a separate pipe_vertex_buffer and pipe_vertex_element for each * vertex attribute. * \param vbuffer returns vertex buffer info * \param velements returns vertex element info */ static boolean setup_non_interleaved_attribs(struct st_context *st, const struct st_vertex_program *vp, const struct st_vp_variant *vpv, const struct gl_client_array **arrays, struct pipe_vertex_buffer vbuffer[], struct pipe_vertex_element velements[]) { struct gl_context *ctx = st->ctx; GLuint attr; for (attr = 0; attr < vpv->num_inputs; attr++) { const GLuint mesaAttr = vp->index_to_input[attr]; const struct gl_client_array *array = arrays[mesaAttr]; struct gl_buffer_object *bufobj = array->BufferObj; GLsizei stride = array->StrideB; assert(array->_ElementSize == _mesa_bytes_per_vertex_attrib(array->Size, array->Type)); if (_mesa_is_bufferobj(bufobj)) { /* Attribute data is in a VBO. * Recall that for VBOs, the gl_client_array->Ptr field is * really an offset from the start of the VBO, not a pointer. */ struct st_buffer_object *stobj = st_buffer_object(bufobj); if (!stobj || !stobj->buffer) { return FALSE; /* out-of-memory error probably */ } vbuffer[attr].buffer = stobj->buffer; vbuffer[attr].user_buffer = NULL; vbuffer[attr].buffer_offset = pointer_to_offset(array->Ptr); } else { /* wrap user data */ void *ptr; if (array->Ptr) { ptr = (void *) array->Ptr; } else { /* no array, use ctx->Current.Attrib[] value */ ptr = (void *) ctx->Current.Attrib[mesaAttr]; stride = 0; } assert(ptr); vbuffer[attr].buffer = NULL; vbuffer[attr].user_buffer = ptr; vbuffer[attr].buffer_offset = 0; } /* common-case setup */ vbuffer[attr].stride = stride; /* in bytes */ velements[attr].src_offset = 0; velements[attr].instance_divisor = array->InstanceDivisor; velements[attr].vertex_buffer_index = attr; velements[attr].src_format = st_pipe_vertex_format(array->Type, array->Size, array->Format, array->Normalized, array->Integer); assert(velements[attr].src_format); } return TRUE; }
/** * Called by VBO to draw arrays when in selection or feedback mode and * to implement glRasterPos. * This is very much like the normal draw_vbo() function above. * Look at code refactoring some day. * Might move this into the failover module some day. */ void st_feedback_draw_vbo(GLcontext *ctx, const struct gl_client_array **arrays, const struct _mesa_prim *prims, GLuint nr_prims, const struct _mesa_index_buffer *ib, GLboolean index_bounds_valid, GLuint min_index, GLuint max_index) { struct st_context *st = ctx->st; struct pipe_context *pipe = st->pipe; struct draw_context *draw = st->draw; const struct st_vertex_program *vp; const struct pipe_shader_state *vs; struct pipe_buffer *index_buffer_handle = 0; struct pipe_vertex_buffer vbuffers[PIPE_MAX_SHADER_INPUTS]; struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS]; GLuint attr, i; ubyte *mapped_constants; assert(draw); st_validate_state(ctx->st); if (!index_bounds_valid) vbo_get_minmax_index(ctx, prims, ib, &min_index, &max_index); /* must get these after state validation! */ vp = ctx->st->vp; vs = &st->vp->state; if (!st->vp->draw_shader) { st->vp->draw_shader = draw_create_vertex_shader(draw, vs); } /* * Set up the draw module's state. * * We'd like to do this less frequently, but the normal state-update * code sends state updates to the pipe, not to our private draw module. */ assert(draw); draw_set_viewport_state(draw, &st->state.viewport); draw_set_clip_state(draw, &st->state.clip); draw_set_rasterizer_state(draw, &st->state.rasterizer); draw_bind_vertex_shader(draw, st->vp->draw_shader); set_feedback_vertex_format(ctx); /* loop over TGSI shader inputs to determine vertex buffer * and attribute info */ for (attr = 0; attr < vp->num_inputs; attr++) { const GLuint mesaAttr = vp->index_to_input[attr]; struct gl_buffer_object *bufobj = arrays[mesaAttr]->BufferObj; void *map; if (bufobj && bufobj->Name) { /* Attribute data is in a VBO. * Recall that for VBOs, the gl_client_array->Ptr field is * really an offset from the start of the VBO, not a pointer. */ struct st_buffer_object *stobj = st_buffer_object(bufobj); assert(stobj->buffer); vbuffers[attr].buffer = NULL; pipe_buffer_reference(&vbuffers[attr].buffer, stobj->buffer); vbuffers[attr].buffer_offset = pointer_to_offset(arrays[0]->Ptr); velements[attr].src_offset = arrays[mesaAttr]->Ptr - arrays[0]->Ptr; } else { /* attribute data is in user-space memory, not a VBO */ uint bytes = (arrays[mesaAttr]->Size * _mesa_sizeof_type(arrays[mesaAttr]->Type) * (max_index + 1)); /* wrap user data */ vbuffers[attr].buffer = pipe_user_buffer_create(pipe->screen, (void *) arrays[mesaAttr]->Ptr, bytes); vbuffers[attr].buffer_offset = 0; velements[attr].src_offset = 0; } /* common-case setup */ vbuffers[attr].stride = arrays[mesaAttr]->StrideB; /* in bytes */ vbuffers[attr].max_index = max_index; velements[attr].vertex_buffer_index = attr; velements[attr].nr_components = arrays[mesaAttr]->Size; velements[attr].src_format = st_pipe_vertex_format(arrays[mesaAttr]->Type, arrays[mesaAttr]->Size, arrays[mesaAttr]->Format, arrays[mesaAttr]->Normalized); assert(velements[attr].src_format); /* tell draw about this attribute */ #if 0 draw_set_vertex_buffer(draw, attr, &vbuffer[attr]); #endif /* map the attrib buffer */ map = pipe_buffer_map(pipe->screen, vbuffers[attr].buffer, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_vertex_buffer(draw, attr, map); } draw_set_vertex_buffers(draw, vp->num_inputs, vbuffers); draw_set_vertex_elements(draw, vp->num_inputs, velements); if (ib) { struct gl_buffer_object *bufobj = ib->obj; unsigned indexSize; void *map; switch (ib->type) { case GL_UNSIGNED_INT: indexSize = 4; break; case GL_UNSIGNED_SHORT: indexSize = 2; break; default: assert(0); return; } if (bufobj && bufobj->Name) { struct st_buffer_object *stobj = st_buffer_object(bufobj); index_buffer_handle = stobj->buffer; map = pipe_buffer_map(pipe->screen, index_buffer_handle, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_element_buffer(draw, indexSize, map); } else { draw_set_mapped_element_buffer(draw, indexSize, (void *) ib->ptr); } } else { /* no index/element buffer */ draw_set_mapped_element_buffer(draw, 0, NULL); } /* map constant buffers */ mapped_constants = pipe_buffer_map(pipe->screen, st->state.constants[PIPE_SHADER_VERTEX].buffer, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_constant_buffer(st->draw, mapped_constants, st->state.constants[PIPE_SHADER_VERTEX].buffer->size); /* draw here */ for (i = 0; i < nr_prims; i++) { draw_arrays(draw, prims[i].mode, prims[i].start, prims[i].count); } /* unmap constant buffers */ pipe_buffer_unmap(pipe->screen, st->state.constants[PIPE_SHADER_VERTEX].buffer); /* * unmap vertex/index buffers */ for (i = 0; i < PIPE_MAX_ATTRIBS; i++) { if (draw->pt.vertex_buffer[i].buffer) { pipe_buffer_unmap(pipe->screen, draw->pt.vertex_buffer[i].buffer); pipe_buffer_reference(&draw->pt.vertex_buffer[i].buffer, NULL); draw_set_mapped_vertex_buffer(draw, i, NULL); } } if (index_buffer_handle) { pipe_buffer_unmap(pipe->screen, index_buffer_handle); draw_set_mapped_element_buffer(draw, 0, NULL); } }
/** * Set up for drawing interleaved arrays that all live in one VBO * or all live in user space. * \param vbuffer returns vertex buffer info * \param velements returns vertex element info */ static boolean setup_interleaved_attribs(const struct st_vertex_program *vp, const struct st_vp_variant *vpv, const struct gl_client_array **arrays, struct pipe_vertex_buffer *vbuffer, struct pipe_vertex_element velements[]) { GLuint attr; const GLubyte *low_addr = NULL; GLboolean usingVBO; /* all arrays in a VBO? */ struct gl_buffer_object *bufobj; GLsizei stride; /* Find the lowest address of the arrays we're drawing, * Init bufobj and stride. */ if (vpv->num_inputs) { const GLuint mesaAttr0 = vp->index_to_input[0]; const struct gl_client_array *array = arrays[mesaAttr0]; /* Since we're doing interleaved arrays, we know there'll be at most * one buffer object and the stride will be the same for all arrays. * Grab them now. */ bufobj = array->BufferObj; stride = array->StrideB; low_addr = arrays[vp->index_to_input[0]]->Ptr; for (attr = 1; attr < vpv->num_inputs; attr++) { const GLubyte *start = arrays[vp->index_to_input[attr]]->Ptr; low_addr = MIN2(low_addr, start); } } else { /* not sure we'll ever have zero inputs, but play it safe */ bufobj = NULL; stride = 0; low_addr = 0; } /* are the arrays in user space? */ usingVBO = _mesa_is_bufferobj(bufobj); for (attr = 0; attr < vpv->num_inputs; attr++) { const GLuint mesaAttr = vp->index_to_input[attr]; const struct gl_client_array *array = arrays[mesaAttr]; unsigned src_offset = (unsigned) (array->Ptr - low_addr); assert(array->_ElementSize == _mesa_bytes_per_vertex_attrib(array->Size, array->Type)); velements[attr].src_offset = src_offset; velements[attr].instance_divisor = array->InstanceDivisor; velements[attr].vertex_buffer_index = 0; velements[attr].src_format = st_pipe_vertex_format(array->Type, array->Size, array->Format, array->Normalized, array->Integer); assert(velements[attr].src_format); } /* * Return the vbuffer info and setup user-space attrib info, if needed. */ if (vpv->num_inputs == 0) { /* just defensive coding here */ vbuffer->buffer = NULL; vbuffer->user_buffer = NULL; vbuffer->buffer_offset = 0; vbuffer->stride = 0; } else if (usingVBO) { /* all interleaved arrays in a VBO */ struct st_buffer_object *stobj = st_buffer_object(bufobj); if (!stobj || !stobj->buffer) { return FALSE; /* out-of-memory error probably */ } vbuffer->buffer = stobj->buffer; vbuffer->user_buffer = NULL; vbuffer->buffer_offset = pointer_to_offset(low_addr); vbuffer->stride = stride; } else { /* all interleaved arrays in user memory */ vbuffer->buffer = NULL; vbuffer->user_buffer = low_addr; vbuffer->buffer_offset = 0; vbuffer->stride = stride; } return TRUE; }