static int emit_consts( struct svga_context *svga, int offset, int unit ) { struct pipe_transfer *transfer = NULL; unsigned count; const float (*data)[4] = NULL; unsigned i; int ret = PIPE_OK; if (svga->curr.cb[unit] == NULL) goto done; count = svga->curr.cb[unit]->width0 / (4 * sizeof(float)); data = (const float (*)[4])pipe_buffer_map(&svga->pipe, svga->curr.cb[unit], PIPE_TRANSFER_READ, &transfer); if (data == NULL) { ret = PIPE_ERROR_OUT_OF_MEMORY; goto done; } for (i = 0; i < count; i++) { ret = emit_const( svga, unit, offset + i, data[i] ); if (ret) goto done; } done: if (data) pipe_buffer_unmap(&svga->pipe, svga->curr.cb[unit], transfer); return ret; }
static void r300_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index, const struct pipe_constant_buffer *buf) { struct r300_context* r300 = r300_context(pipe); void *mapped; if (buf == NULL || buf->buffer->size == 0 || (mapped = pipe_buffer_map(pipe->screen, buf->buffer, PIPE_BUFFER_USAGE_CPU_READ)) == NULL) { r300->shader_constants[shader].count = 0; return; } assert((buf->buffer->size % 4 * sizeof(float)) == 0); memcpy(r300->shader_constants[shader].constants, mapped, buf->buffer->size); r300->shader_constants[shader].count = buf->buffer->size / (4 * sizeof(float)); pipe_buffer_unmap(pipe->screen, buf->buffer); if (shader == PIPE_SHADER_VERTEX) r300->dirty_state |= R300_NEW_VERTEX_SHADER_CONSTANTS; else if (shader == PIPE_SHADER_FRAGMENT) r300->dirty_state |= R300_NEW_FRAGMENT_SHADER_CONSTANTS; }
static void * svga_vbuf_render_map_vertices( struct vbuf_render *render ) { struct svga_vbuf_render *svga_render = svga_vbuf_render(render); struct svga_context *svga = svga_render->svga; if (svga_render->vbuf) { char *ptr = (char*)pipe_buffer_map(&svga->pipe, svga_render->vbuf, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_FLUSH_EXPLICIT | PIPE_TRANSFER_DISCARD | PIPE_TRANSFER_UNSYNCHRONIZED, &svga_render->vbuf_transfer); if (ptr) return ptr + svga_render->vbuf_offset; else return NULL; } else { /* we probably ran out of memory when allocating the vertex buffer */ return NULL; } }
/** * Called by VBO to draw arrays when in selection or feedback mode and * to implement glRasterPos. * This is very much like the normal draw_vbo() function above. * Look at code refactoring some day. */ void st_feedback_draw_vbo(struct gl_context *ctx, const struct gl_client_array **arrays, const struct _mesa_prim *prims, GLuint nr_prims, const struct _mesa_index_buffer *ib, GLboolean index_bounds_valid, GLuint min_index, GLuint max_index, struct gl_transform_feedback_object *tfb_vertcount) { struct st_context *st = st_context(ctx); struct pipe_context *pipe = st->pipe; struct draw_context *draw = st->draw; const struct st_vertex_program *vp; const struct pipe_shader_state *vs; struct pipe_vertex_buffer vbuffers[PIPE_MAX_SHADER_INPUTS]; struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS]; struct pipe_index_buffer ibuffer; struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS]; struct pipe_transfer *ib_transfer = NULL; GLuint attr, i; const GLubyte *low_addr = NULL; const void *mapped_indices = NULL; assert(draw); st_validate_state(st); if (!index_bounds_valid) vbo_get_minmax_indices(ctx, prims, ib, &min_index, &max_index, nr_prims); /* must get these after state validation! */ vp = st->vp; vs = &st->vp_variant->tgsi; if (!st->vp_variant->draw_shader) { st->vp_variant->draw_shader = draw_create_vertex_shader(draw, vs); } /* * Set up the draw module's state. * * We'd like to do this less frequently, but the normal state-update * code sends state updates to the pipe, not to our private draw module. */ assert(draw); draw_set_viewport_state(draw, &st->state.viewport); draw_set_clip_state(draw, &st->state.clip); draw_set_rasterizer_state(draw, &st->state.rasterizer, NULL); draw_bind_vertex_shader(draw, st->vp_variant->draw_shader); set_feedback_vertex_format(ctx); /* Find the lowest address of the arrays we're drawing */ if (vp->num_inputs) { low_addr = arrays[vp->index_to_input[0]]->Ptr; for (attr = 1; attr < vp->num_inputs; attr++) { const GLubyte *start = arrays[vp->index_to_input[attr]]->Ptr; low_addr = MIN2(low_addr, start); } } /* loop over TGSI shader inputs to determine vertex buffer * and attribute info */ for (attr = 0; attr < vp->num_inputs; attr++) { const GLuint mesaAttr = vp->index_to_input[attr]; struct gl_buffer_object *bufobj = arrays[mesaAttr]->BufferObj; void *map; if (bufobj && bufobj->Name) { /* Attribute data is in a VBO. * Recall that for VBOs, the gl_client_array->Ptr field is * really an offset from the start of the VBO, not a pointer. */ struct st_buffer_object *stobj = st_buffer_object(bufobj); assert(stobj->buffer); vbuffers[attr].buffer = NULL; pipe_resource_reference(&vbuffers[attr].buffer, stobj->buffer); vbuffers[attr].buffer_offset = pointer_to_offset(low_addr); velements[attr].src_offset = arrays[mesaAttr]->Ptr - low_addr; } else { /* attribute data is in user-space memory, not a VBO */ uint bytes = (arrays[mesaAttr]->Size * _mesa_sizeof_type(arrays[mesaAttr]->Type) * (max_index + 1)); /* wrap user data */ vbuffers[attr].buffer = pipe_user_buffer_create(pipe->screen, (void *) arrays[mesaAttr]->Ptr, bytes, PIPE_BIND_VERTEX_BUFFER); vbuffers[attr].buffer_offset = 0; velements[attr].src_offset = 0; } /* common-case setup */ vbuffers[attr].stride = arrays[mesaAttr]->StrideB; /* in bytes */ velements[attr].instance_divisor = 0; velements[attr].vertex_buffer_index = attr; velements[attr].src_format = st_pipe_vertex_format(arrays[mesaAttr]->Type, arrays[mesaAttr]->Size, arrays[mesaAttr]->Format, arrays[mesaAttr]->Normalized, arrays[mesaAttr]->Integer); assert(velements[attr].src_format); /* tell draw about this attribute */ #if 0 draw_set_vertex_buffer(draw, attr, &vbuffer[attr]); #endif /* map the attrib buffer */ map = pipe_buffer_map(pipe, vbuffers[attr].buffer, PIPE_TRANSFER_READ, &vb_transfer[attr]); draw_set_mapped_vertex_buffer(draw, attr, map); } draw_set_vertex_buffers(draw, vp->num_inputs, vbuffers); draw_set_vertex_elements(draw, vp->num_inputs, velements); memset(&ibuffer, 0, sizeof(ibuffer)); if (ib) { struct gl_buffer_object *bufobj = ib->obj; ibuffer.index_size = vbo_sizeof_ib_type(ib->type); if (ibuffer.index_size == 0) goto out_unref_vertex; if (bufobj && bufobj->Name) { struct st_buffer_object *stobj = st_buffer_object(bufobj); pipe_resource_reference(&ibuffer.buffer, stobj->buffer); ibuffer.offset = pointer_to_offset(ib->ptr); mapped_indices = pipe_buffer_map(pipe, stobj->buffer, PIPE_TRANSFER_READ, &ib_transfer); } else { /* skip setting ibuffer.buffer as the draw module does not use it */ mapped_indices = ib->ptr; } draw_set_index_buffer(draw, &ibuffer); draw_set_mapped_index_buffer(draw, mapped_indices); } /* set the constant buffer */ draw_set_mapped_constant_buffer(st->draw, PIPE_SHADER_VERTEX, 0, st->state.constants[PIPE_SHADER_VERTEX].ptr, st->state.constants[PIPE_SHADER_VERTEX].size); /* draw here */ for (i = 0; i < nr_prims; i++) { draw_arrays(draw, prims[i].mode, prims[i].start, prims[i].count); } /* * unmap vertex/index buffers */ if (ib) { draw_set_mapped_index_buffer(draw, NULL); draw_set_index_buffer(draw, NULL); if (ib_transfer) pipe_buffer_unmap(pipe, ib_transfer); pipe_resource_reference(&ibuffer.buffer, NULL); } out_unref_vertex: for (attr = 0; attr < vp->num_inputs; attr++) { pipe_buffer_unmap(pipe, vb_transfer[attr]); draw_set_mapped_vertex_buffer(draw, attr, NULL); pipe_resource_reference(&vbuffers[attr].buffer, NULL); } draw_set_vertex_buffers(draw, 0, NULL); }
int main(int argc, char **argv) { struct fbdemos_scaffold *fbs = 0; fbdemo_init(&fbs); int width = fbs->width; int height = fbs->height; struct pipe_context *pipe = fbs->pipe; /* resources */ struct pipe_resource *rt_resource = fbdemo_create_2d(fbs->screen, PIPE_BIND_RENDER_TARGET, PIPE_FORMAT_B8G8R8X8_UNORM, width, height, 0); struct pipe_resource *z_resource = fbdemo_create_2d(fbs->screen, PIPE_BIND_RENDER_TARGET, PIPE_FORMAT_S8_UINT_Z24_UNORM, width, height, 0); struct pipe_resource *vtx_resource = pipe_buffer_create(fbs->screen, PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE, VERTEX_BUFFER_SIZE); struct pipe_resource *idx_resource = pipe_buffer_create(fbs->screen, PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_IMMUTABLE, VERTEX_BUFFER_SIZE); /* bind render target to framebuffer */ etna_fb_bind_resource(fbs, rt_resource); /* vertex / index buffer setup */ struct pipe_transfer *vtx_transfer = 0; float *vtx_logical = pipe_buffer_map(pipe, vtx_resource, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED, &vtx_transfer); assert(vtx_logical); for(int vert=0; vert<NUM_VERTICES; ++vert) { int dest_idx = vert * 3; for(int comp=0; comp<3; ++comp) vtx_logical[dest_idx+comp+0] = vVertices[vert*3 + comp]; /* 0 */ } pipe_buffer_unmap(pipe, vtx_transfer); struct pipe_transfer *idx_transfer = 0; void *idx_logical = pipe_buffer_map(pipe, idx_resource, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED, &idx_transfer); assert(idx_logical); memcpy(idx_logical, indices, sizeof(indices)); pipe_buffer_unmap(pipe, idx_transfer); struct pipe_vertex_buffer vertex_buf_desc = { .stride = (3)*4, .buffer_offset = 0, .buffer = vtx_resource, .user_buffer = 0 }; struct pipe_vertex_element pipe_vertex_elements[] = { { /* positions */ .src_offset = 0, .instance_divisor = 0, .vertex_buffer_index = 0, .src_format = PIPE_FORMAT_R32G32B32_FLOAT }, }; void *vertex_elements = pipe->create_vertex_elements_state(pipe, sizeof(pipe_vertex_elements)/sizeof(pipe_vertex_elements[0]), pipe_vertex_elements); struct pipe_index_buffer index_buf_desc = { .index_size = 1, .offset = 0, .buffer = idx_resource, .user_buffer = 0 }; /* compile gallium3d states */ void *blend = pipe->create_blend_state(pipe, &(struct pipe_blend_state) { .rt[0] = { .blend_enable = 0, .rgb_func = PIPE_BLEND_ADD, .rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA, .rgb_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA, .alpha_func = PIPE_BLEND_ADD, .alpha_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA, .alpha_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA, .colormask = 0xf } }); void *sampler = pipe->create_sampler_state(pipe, &(struct pipe_sampler_state) { .wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE, .wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE, .wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE, .min_img_filter = PIPE_TEX_FILTER_LINEAR, .min_mip_filter = PIPE_TEX_MIPFILTER_LINEAR, .mag_img_filter = PIPE_TEX_FILTER_LINEAR, .normalized_coords = 1, .lod_bias = 0.0f, .min_lod = 0.0f, .max_lod=1000.0f });
void nv30_render_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) { struct nv30_context *nv30 = nv30_context(pipe); struct draw_context *draw = nv30->draw; struct pipe_transfer *transfer[PIPE_MAX_ATTRIBS] = {NULL}; struct pipe_transfer *transferi = NULL; int i; nv30_render_validate(nv30); if (nv30->draw_dirty & NV30_NEW_VIEWPORT) draw_set_viewport_states(draw, 0, 1, &nv30->viewport); if (nv30->draw_dirty & NV30_NEW_RASTERIZER) draw_set_rasterizer_state(draw, &nv30->rast->pipe, NULL); if (nv30->draw_dirty & NV30_NEW_CLIP) draw_set_clip_state(draw, &nv30->clip); if (nv30->draw_dirty & NV30_NEW_ARRAYS) { draw_set_vertex_buffers(draw, 0, nv30->num_vtxbufs, nv30->vtxbuf); draw_set_vertex_elements(draw, nv30->vertex->num_elements, nv30->vertex->pipe); } if (nv30->draw_dirty & NV30_NEW_FRAGPROG) { struct nv30_fragprog *fp = nv30->fragprog.program; if (!fp->draw) fp->draw = draw_create_fragment_shader(draw, &fp->pipe); draw_bind_fragment_shader(draw, fp->draw); } if (nv30->draw_dirty & NV30_NEW_VERTPROG) { struct nv30_vertprog *vp = nv30->vertprog.program; if (!vp->draw) vp->draw = draw_create_vertex_shader(draw, &vp->pipe); draw_bind_vertex_shader(draw, vp->draw); } if (nv30->draw_dirty & NV30_NEW_VERTCONST) { if (nv30->vertprog.constbuf) { void *map = nv04_resource(nv30->vertprog.constbuf)->data; draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 0, map, nv30->vertprog.constbuf_nr * 16); } else { draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 0, NULL, 0); } } for (i = 0; i < nv30->num_vtxbufs; i++) { const void *map = nv30->vtxbuf[i].user_buffer; if (!map) { if (nv30->vtxbuf[i].buffer) map = pipe_buffer_map(pipe, nv30->vtxbuf[i].buffer, PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_READ, &transfer[i]); } draw_set_mapped_vertex_buffer(draw, i, map, ~0); } if (info->indexed) { const void *map = nv30->idxbuf.user_buffer; if (!map) map = pipe_buffer_map(pipe, nv30->idxbuf.buffer, PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_READ, &transferi); draw_set_indexes(draw, (ubyte *) map + nv30->idxbuf.offset, nv30->idxbuf.index_size, ~0); } else { draw_set_indexes(draw, NULL, 0, 0); } draw_vbo(draw, info); draw_flush(draw); if (info->indexed && transferi) pipe_buffer_unmap(pipe, transferi); for (i = 0; i < nv30->num_vtxbufs; i++) if (transfer[i]) pipe_buffer_unmap(pipe, transfer[i]); nv30->draw_dirty = 0; nv30_state_release(nv30); }
static void st_DrawTex(struct gl_context *ctx, GLfloat x, GLfloat y, GLfloat z, GLfloat width, GLfloat height) { struct st_context *st = ctx->st; struct pipe_context *pipe = st->pipe; struct cso_context *cso = ctx->st->cso_context; struct pipe_resource *vbuffer; struct pipe_transfer *vbuffer_transfer; GLuint i, numTexCoords, numAttribs; GLboolean emitColor; uint semantic_names[2 + MAX_TEXTURE_UNITS]; uint semantic_indexes[2 + MAX_TEXTURE_UNITS]; struct pipe_vertex_element velements[2 + MAX_TEXTURE_UNITS]; GLbitfield inputs = VERT_BIT_POS; st_validate_state(st); /* determine if we need vertex color */ if (ctx->FragmentProgram._Current->Base.InputsRead & FRAG_BIT_COL0) emitColor = GL_TRUE; else emitColor = GL_FALSE; /* determine how many enabled sets of texcoords */ numTexCoords = 0; for (i = 0; i < ctx->Const.MaxTextureUnits; i++) { if (ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_2D_BIT) { inputs |= VERT_BIT_TEX(i); numTexCoords++; } } /* total number of attributes per vertex */ numAttribs = 1 + emitColor + numTexCoords; /* create the vertex buffer */ vbuffer = pipe_buffer_create(pipe->screen, PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_STREAM, numAttribs * 4 * 4 * sizeof(GLfloat)); /* load vertex buffer */ { #define SET_ATTRIB(VERT, ATTR, X, Y, Z, W) \ do { \ GLuint k = (((VERT) * numAttribs + (ATTR)) * 4); \ assert(k < 4 * 4 * numAttribs); \ vbuf[k + 0] = X; \ vbuf[k + 1] = Y; \ vbuf[k + 2] = Z; \ vbuf[k + 3] = W; \ } while (0) const GLfloat x0 = x, y0 = y, x1 = x + width, y1 = y + height; GLfloat *vbuf = (GLfloat *) pipe_buffer_map(pipe, vbuffer, PIPE_TRANSFER_WRITE, &vbuffer_transfer); GLuint attr; z = CLAMP(z, 0.0f, 1.0f); /* positions (in clip coords) */ { const struct gl_framebuffer *fb = st->ctx->DrawBuffer; const GLfloat fb_width = (GLfloat)fb->Width; const GLfloat fb_height = (GLfloat)fb->Height; const GLfloat clip_x0 = (GLfloat)(x0 / fb_width * 2.0 - 1.0); const GLfloat clip_y0 = (GLfloat)(y0 / fb_height * 2.0 - 1.0); const GLfloat clip_x1 = (GLfloat)(x1 / fb_width * 2.0 - 1.0); const GLfloat clip_y1 = (GLfloat)(y1 / fb_height * 2.0 - 1.0); SET_ATTRIB(0, 0, clip_x0, clip_y0, z, 1.0f); /* lower left */ SET_ATTRIB(1, 0, clip_x1, clip_y0, z, 1.0f); /* lower right */ SET_ATTRIB(2, 0, clip_x1, clip_y1, z, 1.0f); /* upper right */ SET_ATTRIB(3, 0, clip_x0, clip_y1, z, 1.0f); /* upper left */ semantic_names[0] = TGSI_SEMANTIC_POSITION; semantic_indexes[0] = 0; } /* colors */ if (emitColor) { const GLfloat *c = ctx->Current.Attrib[VERT_ATTRIB_COLOR0]; SET_ATTRIB(0, 1, c[0], c[1], c[2], c[3]); SET_ATTRIB(1, 1, c[0], c[1], c[2], c[3]); SET_ATTRIB(2, 1, c[0], c[1], c[2], c[3]); SET_ATTRIB(3, 1, c[0], c[1], c[2], c[3]); semantic_names[1] = TGSI_SEMANTIC_COLOR; semantic_indexes[1] = 0; attr = 2; } else { attr = 1; } /* texcoords */ for (i = 0; i < ctx->Const.MaxTextureUnits; i++) { if (ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_2D_BIT) { struct gl_texture_object *obj = ctx->Texture.Unit[i]._Current; struct gl_texture_image *img = obj->Image[0][obj->BaseLevel]; const GLfloat wt = (GLfloat) img->Width; const GLfloat ht = (GLfloat) img->Height; const GLfloat s0 = obj->CropRect[0] / wt; const GLfloat t0 = obj->CropRect[1] / ht; const GLfloat s1 = (obj->CropRect[0] + obj->CropRect[2]) / wt; const GLfloat t1 = (obj->CropRect[1] + obj->CropRect[3]) / ht; /*printf("crop texcoords: %g, %g .. %g, %g\n", s0, t0, s1, t1);*/ SET_ATTRIB(0, attr, s0, t0, 0.0f, 1.0f); /* lower left */ SET_ATTRIB(1, attr, s1, t0, 0.0f, 1.0f); /* lower right */ SET_ATTRIB(2, attr, s1, t1, 0.0f, 1.0f); /* upper right */ SET_ATTRIB(3, attr, s0, t1, 0.0f, 1.0f); /* upper left */ semantic_names[attr] = TGSI_SEMANTIC_GENERIC; semantic_indexes[attr] = 0; attr++; } } pipe_buffer_unmap(pipe, vbuffer_transfer); #undef SET_ATTRIB } cso_save_viewport(cso); cso_save_vertex_shader(cso); cso_save_vertex_elements(cso); cso_save_vertex_buffers(cso); { void *vs = lookup_shader(pipe, numAttribs, semantic_names, semantic_indexes); cso_set_vertex_shader_handle(cso, vs); } for (i = 0; i < numAttribs; i++) { velements[i].src_offset = i * 4 * sizeof(float); velements[i].instance_divisor = 0; velements[i].vertex_buffer_index = 0; velements[i].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT; } cso_set_vertex_elements(cso, numAttribs, velements); /* viewport state: viewport matching window dims */ { const struct gl_framebuffer *fb = st->ctx->DrawBuffer; const GLboolean invert = (st_fb_orientation(fb) == Y_0_TOP); const GLfloat width = (GLfloat)fb->Width; const GLfloat height = (GLfloat)fb->Height; struct pipe_viewport_state vp; vp.scale[0] = 0.5f * width; vp.scale[1] = height * (invert ? -0.5f : 0.5f); vp.scale[2] = 1.0f; vp.scale[3] = 1.0f; vp.translate[0] = 0.5f * width; vp.translate[1] = 0.5f * height; vp.translate[2] = 0.0f; vp.translate[3] = 0.0f; cso_set_viewport(cso, &vp); } util_draw_vertex_buffer(pipe, cso, vbuffer, 0, /* offset */ PIPE_PRIM_TRIANGLE_FAN, 4, /* verts */ numAttribs); /* attribs/vert */ pipe_resource_reference(&vbuffer, NULL); /* restore state */ cso_restore_viewport(cso); cso_restore_vertex_shader(cso); cso_restore_vertex_elements(cso); cso_restore_vertex_buffers(cso); }
int main(int argc, char **argv) { struct fbdemos_scaffold *fbs = 0; fbdemo_init(&fbs); int width = fbs->width; int height = fbs->height; struct pipe_context *pipe = fbs->pipe; /* resources */ struct pipe_resource *rt_resource = fbdemo_create_2d(fbs->screen, PIPE_BIND_RENDER_TARGET, PIPE_FORMAT_B8G8R8X8_UNORM, width, height, 0); struct pipe_resource *z_resource = fbdemo_create_2d(fbs->screen, PIPE_BIND_RENDER_TARGET, PIPE_FORMAT_Z16_UNORM, width, height, 0); struct pipe_resource *vtx_resource = pipe_buffer_create(fbs->screen, PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE, VERTEX_BUFFER_SIZE); struct pipe_resource *idx_resource = pipe_buffer_create(fbs->screen, PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_IMMUTABLE, VERTEX_BUFFER_SIZE); /* bind render target to framebuffer */ etna_fb_bind_resource(&fbs->fb, rt_resource); /* Phew, now we got all the memory we need. * Write interleaved attribute vertex stream. * Unlike the GL example we only do this once, not every time glDrawArrays is called, the same would be accomplished * from GL by using a vertex buffer object. */ float *vVertices; float *vNormals; float *vTexCoords; uint16_t *vIndices; int numVertices = 0; int numIndices = esGenSphere(80, 1.0f, &vVertices, &vNormals, &vTexCoords, &vIndices, &numVertices); unsigned vtxStride = 3+3+2; assert((numVertices * vtxStride*4) < VERTEX_BUFFER_SIZE); struct pipe_transfer *vtx_transfer = 0; float *vtx_logical = pipe_buffer_map(pipe, vtx_resource, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED, &vtx_transfer); for(int vert=0; vert<numVertices; ++vert) { int dest_idx = vert * vtxStride; for(int comp=0; comp<3; ++comp) vtx_logical[dest_idx+comp+0] = vVertices[vert*3 + comp]; /* 0 */ for(int comp=0; comp<3; ++comp) vtx_logical[dest_idx+comp+3] = vNormals[vert*3 + comp]; /* 1 */ for(int comp=0; comp<2; ++comp) vtx_logical[dest_idx+comp+6] = vTexCoords[vert*2 + comp]; /* 2 */ } pipe_buffer_unmap(pipe, vtx_transfer); assert((numIndices * 2) < VERTEX_BUFFER_SIZE); struct pipe_transfer *idx_transfer = 0; void *idx_logical = pipe_buffer_map(pipe, idx_resource, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED, &idx_transfer); memcpy(idx_logical, vIndices, numIndices*sizeof(uint16_t)); pipe_buffer_unmap(pipe, idx_transfer); /* compile gallium3d states */ void *blend = pipe->create_blend_state(pipe, &(struct pipe_blend_state) { .rt[0] = { .blend_enable = 0, .rgb_func = PIPE_BLEND_ADD, .rgb_src_factor = PIPE_BLENDFACTOR_ONE, .rgb_dst_factor = PIPE_BLENDFACTOR_ZERO, .alpha_func = PIPE_BLEND_ADD, .alpha_src_factor = PIPE_BLENDFACTOR_ONE, .alpha_dst_factor = PIPE_BLENDFACTOR_ZERO, .colormask = 0xf } });
/** * Draw vertex arrays, with optional indexing. * Basically, map the vertex buffers (and drawing surfaces), then hand off * the drawing to the 'draw' module. * * XXX should the element buffer be specified/bound with a separate function? */ static boolean cell_draw_range_elements(struct pipe_context *pipe, struct pipe_buffer *indexBuffer, unsigned indexSize, unsigned min_index, unsigned max_index, unsigned mode, unsigned start, unsigned count) { struct cell_context *sp = cell_context(pipe); struct draw_context *draw = sp->draw; unsigned i; if (sp->dirty) cell_update_derived( sp ); #if 0 cell_map_surfaces(sp); #endif cell_map_constant_buffers(sp); /* * Map vertex buffers */ for (i = 0; i < sp->num_vertex_buffers; i++) { void *buf = pipe_buffer_map(pipe->screen, sp->vertex_buffer[i].buffer, PIPE_BUFFER_USAGE_CPU_READ); cell_flush_buffer_range(sp, buf, sp->vertex_buffer[i].buffer->size); draw_set_mapped_vertex_buffer(draw, i, buf); } /* Map index buffer, if present */ if (indexBuffer) { void *mapped_indexes = pipe_buffer_map(pipe->screen, indexBuffer, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_element_buffer(draw, indexSize, mapped_indexes); } else { /* no index/element buffer */ draw_set_mapped_element_buffer(draw, 0, NULL); } /* draw! */ draw_arrays(draw, mode, start, count); /* * unmap vertex/index buffers - will cause draw module to flush */ for (i = 0; i < sp->num_vertex_buffers; i++) { draw_set_mapped_vertex_buffer(draw, i, NULL); pipe_buffer_unmap(pipe->screen, sp->vertex_buffer[i].buffer); } if (indexBuffer) { draw_set_mapped_element_buffer(draw, 0, NULL); pipe_buffer_unmap(pipe->screen, indexBuffer); } /* Note: leave drawing surfaces mapped */ cell_unmap_constant_buffers(sp); return TRUE; }
int main(int argc, char **argv) { struct fbdemos_scaffold *fbs = 0; fbdemo_init(&fbs); int width = fbs->width; int height = fbs->height; struct pipe_context *pipe = fbs->pipe; dds_texture *dds = 0; if(argc<2 || !dds_load(argv[1], &dds)) { printf("Error loading texture\n"); exit(1); } uint32_t tex_format = 0; uint32_t tex_base_width = dds->slices[0][0].width; uint32_t tex_base_height = dds->slices[0][0].height; switch(dds->fmt) { case FMT_A8R8G8B8: tex_format = PIPE_FORMAT_B8G8R8A8_UNORM; break; case FMT_X8R8G8B8: tex_format = PIPE_FORMAT_B8G8R8X8_UNORM; break; case FMT_DXT1: tex_format = PIPE_FORMAT_DXT1_RGB; break; case FMT_DXT3: tex_format = PIPE_FORMAT_DXT3_RGBA; break; case FMT_DXT5: tex_format = PIPE_FORMAT_DXT5_RGBA; break; case FMT_ETC1: tex_format = PIPE_FORMAT_ETC1_RGB8; break; case FMT_A8: tex_format = PIPE_FORMAT_A8_UNORM; break; case FMT_L8: tex_format = PIPE_FORMAT_L8_UNORM; break; case FMT_A8L8: tex_format = PIPE_FORMAT_L8A8_UNORM; break; default: printf("Unknown texture format\n"); exit(1); } struct pipe_resource *tex_resource = fbdemo_create_2d(fbs->screen, PIPE_BIND_SAMPLER_VIEW, tex_format, tex_base_width, tex_base_height, dds->num_mipmaps - 1); printf("Loading compressed texture (format %i, %ix%i)\n", dds->fmt, tex_base_width, tex_base_height); for(int ix=0; ix<dds->num_mipmaps; ++ix) { printf("%08x: Uploading mipmap %i (%ix%i)\n", dds->slices[0][ix].offset, ix, dds->slices[0][ix].width, dds->slices[0][ix].height); etna_pipe_inline_write(pipe, tex_resource, 0, ix, dds->slices[0][ix].data, dds->slices[0][ix].size); } /* resources */ struct pipe_resource *rt_resource = fbdemo_create_2d(fbs->screen, PIPE_BIND_RENDER_TARGET, PIPE_FORMAT_B8G8R8X8_UNORM, width, height, 0); struct pipe_resource *z_resource = fbdemo_create_2d(fbs->screen, PIPE_BIND_RENDER_TARGET, PIPE_FORMAT_Z16_UNORM, width, height, 0); struct pipe_resource *vtx_resource = pipe_buffer_create(fbs->screen, PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE, VERTEX_BUFFER_SIZE); /* bind render target to framebuffer */ etna_fb_bind_resource(&fbs->fb, rt_resource); /* Phew, now we got all the memory we need. * Write interleaved attribute vertex stream. * Unlike the GL example we only do this once, not every time glDrawArrays is called, the same would be accomplished * from GL by using a vertex buffer object. */ struct pipe_transfer *vtx_transfer = 0; float *vtx_logical = pipe_buffer_map(pipe, vtx_resource, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED, &vtx_transfer); assert(vtx_logical); for(int vert=0; vert<NUM_VERTICES; ++vert) { int dest_idx = vert * (3 + 3 + 2); for(int comp=0; comp<3; ++comp) vtx_logical[dest_idx+comp+0] = vVertices[vert*3 + comp]; /* 0 */ for(int comp=0; comp<3; ++comp) vtx_logical[dest_idx+comp+3] = vNormals[vert*3 + comp]; /* 1 */ for(int comp=0; comp<2; ++comp) vtx_logical[dest_idx+comp+6] = vTexCoords[vert*2 + comp]; /* 2 */ } pipe_buffer_unmap(pipe, vtx_transfer); /* compile gallium3d states */ void *blend = NULL; if(tex_format == PIPE_FORMAT_A8_UNORM || tex_format == PIPE_FORMAT_L8A8_UNORM) /* if alpha texture, enable blending */ { blend = pipe->create_blend_state(pipe, &(struct pipe_blend_state) { .rt[0] = { .blend_enable = 1, .rgb_func = PIPE_BLEND_ADD, .rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA, .rgb_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA, .alpha_func = PIPE_BLEND_ADD, .alpha_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA, .alpha_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA, .colormask = 0xf } });
static void r300_emit_draw_arrays_immediate(struct r300_context *r300, unsigned mode, unsigned start, unsigned count) { struct pipe_vertex_element* velem; struct pipe_vertex_buffer* vbuf; unsigned vertex_element_count = r300->velems->count; unsigned i, v, vbi; /* Size of the vertex, in dwords. */ unsigned vertex_size = r300->velems->vertex_size_dwords; /* The number of dwords for this draw operation. */ unsigned dwords = 9 + count * vertex_size; /* Size of the vertex element, in dwords. */ unsigned size[PIPE_MAX_ATTRIBS]; /* Stride to the same attrib in the next vertex in the vertex buffer, * in dwords. */ unsigned stride[PIPE_MAX_ATTRIBS]; /* Mapped vertex buffers. */ uint32_t* map[PIPE_MAX_ATTRIBS]; uint32_t* mapelem[PIPE_MAX_ATTRIBS]; struct pipe_transfer* transfer[PIPE_MAX_ATTRIBS] = {0}; CS_LOCALS(r300); if (!r300_prepare_for_rendering(r300, PREP_FIRST_DRAW, NULL, dwords, 0, 0)) return; /* Calculate the vertex size, offsets, strides etc. and map the buffers. */ for (i = 0; i < vertex_element_count; i++) { velem = &r300->velems->velem[i]; size[i] = r300->velems->hw_format_size[i] / 4; vbi = velem->vertex_buffer_index; vbuf = &r300->vertex_buffer[vbi]; stride[i] = vbuf->stride / 4; /* Map the buffer. */ if (!transfer[vbi]) { map[vbi] = (uint32_t*)pipe_buffer_map(&r300->context, vbuf->buffer, PIPE_TRANSFER_READ, &transfer[vbi]); map[vbi] += (vbuf->buffer_offset / 4) + stride[i] * start; } mapelem[i] = map[vbi] + (velem->src_offset / 4); } BEGIN_CS(dwords); OUT_CS_REG(R300_GA_COLOR_CONTROL, r300_provoking_vertex_fixes(r300, mode)); OUT_CS_REG(R300_VAP_VTX_SIZE, vertex_size); OUT_CS_REG_SEQ(R300_VAP_VF_MAX_VTX_INDX, 2); OUT_CS(count - 1); OUT_CS(0); OUT_CS_PKT3(R300_PACKET3_3D_DRAW_IMMD_2, count * vertex_size); OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED | (count << 16) | r300_translate_primitive(mode)); /* Emit vertices. */ for (v = 0; v < count; v++) { for (i = 0; i < vertex_element_count; i++) { OUT_CS_TABLE(&mapelem[i][stride[i] * v], size[i]); } } END_CS; /* Unmap buffers. */ for (i = 0; i < vertex_element_count; i++) { vbi = r300->velems->velem[i].vertex_buffer_index; if (transfer[vbi]) { vbuf = &r300->vertex_buffer[vbi]; pipe_buffer_unmap(&r300->context, vbuf->buffer, transfer[vbi]); transfer[vbi] = NULL; } } }
/* SW TCL elements, using Draw. */ static void r300_swtcl_draw_vbo(struct pipe_context* pipe, const struct pipe_draw_info *info) { struct r300_context* r300 = r300_context(pipe); struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS]; struct pipe_transfer *ib_transfer = NULL; unsigned count = info->count; int i; void *indices = NULL; boolean indexed = info->indexed && r300->index_buffer.buffer; if (r300->skip_rendering) { return; } if (!u_trim_pipe_prim(info->mode, &count)) { return; } r300_update_derived_state(r300); r300_reserve_cs_dwords(r300, PREP_FIRST_DRAW | PREP_EMIT_AOS_SWTCL | (indexed ? PREP_INDEXED : 0), indexed ? 256 : 6); for (i = 0; i < r300->vertex_buffer_count; i++) { if (r300->vertex_buffer[i].buffer) { void *buf = pipe_buffer_map(pipe, r300->vertex_buffer[i].buffer, PIPE_TRANSFER_READ, &vb_transfer[i]); draw_set_mapped_vertex_buffer(r300->draw, i, buf); } } if (indexed) { indices = pipe_buffer_map(pipe, r300->index_buffer.buffer, PIPE_TRANSFER_READ, &ib_transfer); } draw_set_mapped_index_buffer(r300->draw, indices); r300->draw_vbo_locked = TRUE; r300->draw_first_emitted = FALSE; draw_vbo(r300->draw, info); draw_flush(r300->draw); r300->draw_vbo_locked = FALSE; for (i = 0; i < r300->vertex_buffer_count; i++) { if (r300->vertex_buffer[i].buffer) { pipe_buffer_unmap(pipe, r300->vertex_buffer[i].buffer, vb_transfer[i]); draw_set_mapped_vertex_buffer(r300->draw, i, NULL); } } if (indexed) { pipe_buffer_unmap(pipe, r300->index_buffer.buffer, ib_transfer); draw_set_mapped_index_buffer(r300->draw, NULL); } }
struct pipe_context * dd_context_create(struct dd_screen *dscreen, struct pipe_context *pipe) { struct dd_context *dctx; if (!pipe) return NULL; dctx = CALLOC_STRUCT(dd_context); if (!dctx) goto fail; dctx->pipe = pipe; dctx->base.priv = pipe->priv; /* expose wrapped priv data */ dctx->base.screen = &dscreen->base; dctx->base.stream_uploader = pipe->stream_uploader; dctx->base.const_uploader = pipe->const_uploader; dctx->base.destroy = dd_context_destroy; CTX_INIT(render_condition); CTX_INIT(create_query); CTX_INIT(create_batch_query); CTX_INIT(destroy_query); CTX_INIT(begin_query); CTX_INIT(end_query); CTX_INIT(get_query_result); CTX_INIT(set_active_query_state); CTX_INIT(create_blend_state); CTX_INIT(bind_blend_state); CTX_INIT(delete_blend_state); CTX_INIT(create_sampler_state); CTX_INIT(bind_sampler_states); CTX_INIT(delete_sampler_state); CTX_INIT(create_rasterizer_state); CTX_INIT(bind_rasterizer_state); CTX_INIT(delete_rasterizer_state); CTX_INIT(create_depth_stencil_alpha_state); CTX_INIT(bind_depth_stencil_alpha_state); CTX_INIT(delete_depth_stencil_alpha_state); CTX_INIT(create_fs_state); CTX_INIT(bind_fs_state); CTX_INIT(delete_fs_state); CTX_INIT(create_vs_state); CTX_INIT(bind_vs_state); CTX_INIT(delete_vs_state); CTX_INIT(create_gs_state); CTX_INIT(bind_gs_state); CTX_INIT(delete_gs_state); CTX_INIT(create_tcs_state); CTX_INIT(bind_tcs_state); CTX_INIT(delete_tcs_state); CTX_INIT(create_tes_state); CTX_INIT(bind_tes_state); CTX_INIT(delete_tes_state); CTX_INIT(create_compute_state); CTX_INIT(bind_compute_state); CTX_INIT(delete_compute_state); CTX_INIT(create_vertex_elements_state); CTX_INIT(bind_vertex_elements_state); CTX_INIT(delete_vertex_elements_state); CTX_INIT(set_blend_color); CTX_INIT(set_stencil_ref); CTX_INIT(set_sample_mask); CTX_INIT(set_min_samples); CTX_INIT(set_clip_state); CTX_INIT(set_constant_buffer); CTX_INIT(set_framebuffer_state); CTX_INIT(set_polygon_stipple); CTX_INIT(set_scissor_states); CTX_INIT(set_viewport_states); CTX_INIT(set_sampler_views); CTX_INIT(set_tess_state); CTX_INIT(set_shader_buffers); CTX_INIT(set_shader_images); CTX_INIT(set_vertex_buffers); CTX_INIT(create_stream_output_target); CTX_INIT(stream_output_target_destroy); CTX_INIT(set_stream_output_targets); CTX_INIT(create_sampler_view); CTX_INIT(sampler_view_destroy); CTX_INIT(create_surface); CTX_INIT(surface_destroy); CTX_INIT(transfer_map); CTX_INIT(transfer_flush_region); CTX_INIT(transfer_unmap); CTX_INIT(buffer_subdata); CTX_INIT(texture_subdata); CTX_INIT(texture_barrier); CTX_INIT(memory_barrier); CTX_INIT(resource_commit); /* create_video_codec */ /* create_video_buffer */ /* set_compute_resources */ /* set_global_binding */ CTX_INIT(get_sample_position); CTX_INIT(invalidate_resource); CTX_INIT(get_device_reset_status); CTX_INIT(set_device_reset_callback); CTX_INIT(dump_debug_state); CTX_INIT(emit_string_marker); CTX_INIT(create_texture_handle); CTX_INIT(delete_texture_handle); CTX_INIT(make_texture_handle_resident); CTX_INIT(create_image_handle); CTX_INIT(delete_image_handle); CTX_INIT(make_image_handle_resident); dd_init_draw_functions(dctx); u_log_context_init(&dctx->log); if (pipe->set_log_context) pipe->set_log_context(pipe, &dctx->log); dctx->draw_state.sample_mask = ~0; if (dscreen->mode == DD_DETECT_HANGS_PIPELINED) { dctx->fence = pipe_buffer_create(dscreen->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING, 4); if (!dctx->fence) goto fail; dctx->mapped_fence = pipe_buffer_map(pipe, dctx->fence, PIPE_TRANSFER_READ_WRITE | PIPE_TRANSFER_PERSISTENT | PIPE_TRANSFER_COHERENT, &dctx->fence_transfer); if (!dctx->mapped_fence) goto fail; *dctx->mapped_fence = 0; (void) mtx_init(&dctx->mutex, mtx_plain); dctx->thread = u_thread_create(dd_thread_pipelined_hang_detect, dctx); if (!dctx->thread) { mtx_destroy(&dctx->mutex); goto fail; } } return &dctx->base; fail: if (dctx) { if (dctx->mapped_fence) pipe_transfer_unmap(pipe, dctx->fence_transfer); pipe_resource_reference(&dctx->fence, NULL); FREE(dctx); } pipe->destroy(pipe); return NULL; }
void vl_compositor_render(struct vl_compositor *compositor, /*struct pipe_texture *backround, struct pipe_video_rect *backround_area,*/ struct pipe_texture *src_surface, enum pipe_mpeg12_picture_type picture_type, /*unsigned num_past_surfaces, struct pipe_texture *past_surfaces, unsigned num_future_surfaces, struct pipe_texture *future_surfaces,*/ struct pipe_video_rect *src_area, struct pipe_texture *dst_surface, struct pipe_video_rect *dst_area, /*unsigned num_layers, struct pipe_texture *layers, struct pipe_video_rect *layer_src_areas, struct pipe_video_rect *layer_dst_areas*/ struct pipe_fence_handle **fence) { struct vertex_shader_consts *vs_consts; assert(compositor); assert(src_surface); assert(src_area); assert(dst_surface); assert(dst_area); assert(picture_type == PIPE_MPEG12_PICTURE_TYPE_FRAME); compositor->fb_state.width = dst_surface->width[0]; compositor->fb_state.height = dst_surface->height[0]; compositor->fb_state.cbufs[0] = compositor->pipe->screen->get_tex_surface ( compositor->pipe->screen, dst_surface, 0, 0, 0, PIPE_BUFFER_USAGE_GPU_READ | PIPE_BUFFER_USAGE_GPU_WRITE ); compositor->viewport.scale[0] = compositor->fb_state.width; compositor->viewport.scale[1] = compositor->fb_state.height; compositor->viewport.scale[2] = 1; compositor->viewport.scale[3] = 1; compositor->viewport.translate[0] = 0; compositor->viewport.translate[1] = 0; compositor->viewport.translate[2] = 0; compositor->viewport.translate[3] = 0; compositor->scissor.maxx = compositor->fb_state.width; compositor->scissor.maxy = compositor->fb_state.height; compositor->pipe->set_framebuffer_state(compositor->pipe, &compositor->fb_state); compositor->pipe->set_viewport_state(compositor->pipe, &compositor->viewport); compositor->pipe->set_scissor_state(compositor->pipe, &compositor->scissor); compositor->pipe->bind_sampler_states(compositor->pipe, 1, &compositor->sampler); compositor->pipe->set_sampler_textures(compositor->pipe, 1, &src_surface); compositor->pipe->bind_vs_state(compositor->pipe, compositor->vertex_shader); compositor->pipe->bind_fs_state(compositor->pipe, compositor->fragment_shader); compositor->pipe->set_vertex_buffers(compositor->pipe, 2, compositor->vertex_bufs); compositor->pipe->set_vertex_elements(compositor->pipe, 2, compositor->vertex_elems); compositor->pipe->set_constant_buffer(compositor->pipe, PIPE_SHADER_VERTEX, 0, &compositor->vs_const_buf); compositor->pipe->set_constant_buffer(compositor->pipe, PIPE_SHADER_FRAGMENT, 0, &compositor->fs_const_buf); vs_consts = pipe_buffer_map ( compositor->pipe->screen, compositor->vs_const_buf.buffer, PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD ); vs_consts->dst_scale.x = dst_area->w / (float)compositor->fb_state.cbufs[0]->width; vs_consts->dst_scale.y = dst_area->h / (float)compositor->fb_state.cbufs[0]->height; vs_consts->dst_scale.z = 1; vs_consts->dst_scale.w = 1; vs_consts->dst_trans.x = dst_area->x / (float)compositor->fb_state.cbufs[0]->width; vs_consts->dst_trans.y = dst_area->y / (float)compositor->fb_state.cbufs[0]->height; vs_consts->dst_trans.z = 0; vs_consts->dst_trans.w = 0; vs_consts->src_scale.x = src_area->w / (float)src_surface->width[0]; vs_consts->src_scale.y = src_area->h / (float)src_surface->height[0]; vs_consts->src_scale.z = 1; vs_consts->src_scale.w = 1; vs_consts->src_trans.x = src_area->x / (float)src_surface->width[0]; vs_consts->src_trans.y = src_area->y / (float)src_surface->height[0]; vs_consts->src_trans.z = 0; vs_consts->src_trans.w = 0; pipe_buffer_unmap(compositor->pipe->screen, compositor->vs_const_buf.buffer); compositor->pipe->draw_arrays(compositor->pipe, PIPE_PRIM_TRIANGLE_STRIP, 0, 4); compositor->pipe->flush(compositor->pipe, PIPE_FLUSH_RENDER_CACHE, fence); pipe_surface_reference(&compositor->fb_state.cbufs[0], NULL); }
static bool init_buffers(struct vl_compositor *c) { struct fragment_shader_consts fsc; assert(c); /* * Create our vertex buffer and vertex buffer element * VB contains 4 vertices that render a quad covering the entire window * to display a rendered surface * Quad is rendered as a tri strip */ c->vertex_bufs[0].stride = sizeof(struct vertex2f); c->vertex_bufs[0].max_index = 3; c->vertex_bufs[0].buffer_offset = 0; c->vertex_bufs[0].buffer = pipe_buffer_create ( c->pipe->screen, 1, PIPE_BUFFER_USAGE_VERTEX, sizeof(struct vertex2f) * 4 ); memcpy ( pipe_buffer_map(c->pipe->screen, c->vertex_bufs[0].buffer, PIPE_BUFFER_USAGE_CPU_WRITE), surface_verts, sizeof(struct vertex2f) * 4 ); pipe_buffer_unmap(c->pipe->screen, c->vertex_bufs[0].buffer); c->vertex_elems[0].src_offset = 0; c->vertex_elems[0].vertex_buffer_index = 0; c->vertex_elems[0].nr_components = 2; c->vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT; /* * Create our texcoord buffer and texcoord buffer element * Texcoord buffer contains the TCs for mapping the rendered surface to the 4 vertices */ c->vertex_bufs[1].stride = sizeof(struct vertex2f); c->vertex_bufs[1].max_index = 3; c->vertex_bufs[1].buffer_offset = 0; c->vertex_bufs[1].buffer = pipe_buffer_create ( c->pipe->screen, 1, PIPE_BUFFER_USAGE_VERTEX, sizeof(struct vertex2f) * 4 ); memcpy ( pipe_buffer_map(c->pipe->screen, c->vertex_bufs[1].buffer, PIPE_BUFFER_USAGE_CPU_WRITE), surface_texcoords, sizeof(struct vertex2f) * 4 ); pipe_buffer_unmap(c->pipe->screen, c->vertex_bufs[1].buffer); c->vertex_elems[1].src_offset = 0; c->vertex_elems[1].vertex_buffer_index = 1; c->vertex_elems[1].nr_components = 2; c->vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT; /* * Create our vertex shader's constant buffer * Const buffer contains scaling and translation vectors */ c->vs_const_buf.buffer = pipe_buffer_create ( c->pipe->screen, 1, PIPE_BUFFER_USAGE_CONSTANT | PIPE_BUFFER_USAGE_DISCARD, sizeof(struct vertex_shader_consts) ); /* * Create our fragment shader's constant buffer * Const buffer contains the color conversion matrix and bias vectors */ c->fs_const_buf.buffer = pipe_buffer_create ( c->pipe->screen, 1, PIPE_BUFFER_USAGE_CONSTANT, sizeof(struct fragment_shader_consts) ); vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY, NULL, true, fsc.matrix); vl_compositor_set_csc_matrix(c, fsc.matrix); return true; }
pipe->set_viewport_state(pipe, &(struct pipe_viewport_state){ .scale = {width/2.0f, height/2.0f, 0.5f, 1.0f}, .translate = {width/2.0f, height/2.0f, 0.5f, 1.0f} }); pipe->set_fragment_sampler_views(pipe, 1, &sampler_view); pipe->set_vertex_buffers(pipe, 0, 1, &vertex_buffer_desc); pipe->set_index_buffer(pipe, NULL); void *vtx_shader = graw_parse_vertex_shader(pipe, particle_system_vert); void *frag_shader = graw_parse_fragment_shader(pipe, particle_system_frag); pipe->bind_vs_state(pipe, vtx_shader); pipe->bind_fs_state(pipe, frag_shader); /* Fill in particle data array */ struct pipe_transfer *vtx_transfer = 0; float *vtx_logical = pipe_buffer_map(pipe, vtx_resource, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED, &vtx_transfer); srand(0); for(int i = 0; i < NUM_PARTICLES; i++) { float *particleData = &vtx_logical[i * PARTICLE_SIZE]; // Lifetime of particle (*particleData++) = ( (float)(rand() % 10000) / 10000.0f ); // End position of particle (*particleData++) = ( (float)(rand() % 10000) / 5000.0f ) - 1.0f; (*particleData++) = ( (float)(rand() % 10000) / 5000.0f ) - 1.0f; (*particleData++) = ( (float)(rand() % 10000) / 5000.0f ) - 1.0f; // Start position of particle (*particleData++) = ( (float)(rand() % 10000) / 40000.0f ) - 0.125f;
int main(int argc, char **argv) { struct fbdemos_scaffold *fbs = 0; fbdemo_init(&fbs); int width = fbs->width; int height = fbs->height; struct pipe_context *pipe = fbs->pipe; /* Convert and upload embedded texture */ struct pipe_resource *tex_resource = fbdemo_create_2d(fbs->screen, PIPE_BIND_SAMPLER_VIEW, PIPE_FORMAT_B8G8R8X8_UNORM, COMPANION_TEXTURE_WIDTH, COMPANION_TEXTURE_HEIGHT, 0); void *temp = malloc(COMPANION_TEXTURE_WIDTH * COMPANION_TEXTURE_HEIGHT * 4); etna_convert_r8g8b8_to_b8g8r8x8(temp, (const uint8_t*)companion_texture, COMPANION_TEXTURE_WIDTH * COMPANION_TEXTURE_HEIGHT); etna_pipe_inline_write(pipe, tex_resource, 0, 0, temp, COMPANION_TEXTURE_WIDTH * COMPANION_TEXTURE_HEIGHT * 4); free(temp); /* resources */ struct pipe_resource *rt_resource = fbdemo_create_2d(fbs->screen, PIPE_BIND_RENDER_TARGET, PIPE_FORMAT_B8G8R8X8_UNORM, width, height, 0); struct pipe_resource *z_resource = fbdemo_create_2d(fbs->screen, PIPE_BIND_RENDER_TARGET, PIPE_FORMAT_Z16_UNORM, width, height, 0); /* bind render target to framebuffer */ etna_fb_bind_resource(&fbs->fb, rt_resource); /* geometry */ struct pipe_resource *vtx_resource = pipe_buffer_create(fbs->screen, PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE, VERTEX_BUFFER_SIZE); struct pipe_resource *idx_resource = pipe_buffer_create(fbs->screen, PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_IMMUTABLE, INDEX_BUFFER_SIZE); struct pipe_transfer *vtx_transfer = 0; float *vtx_logical = pipe_buffer_map(pipe, vtx_resource, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED, &vtx_transfer); assert(vtx_logical); struct pipe_transfer *idx_transfer = 0; float *idx_logical = pipe_buffer_map(pipe, idx_resource, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED, &idx_transfer); assert(idx_logical); #ifndef INDEXED printf("Interleaving vertices...\n"); float *vertices_array = companion_vertices_array(); float *texture_coordinates_array = companion_texture_coordinates_array(); float *normals_array = companion_normals_array(); assert(COMPANION_ARRAY_COUNT*(3+3+2)*sizeof(float) < VERTEX_BUFFER_SIZE); for(int vert=0; vert<COMPANION_ARRAY_COUNT; ++vert) { int dest_idx = vert * (3 + 3 + 2); for(int comp=0; comp<3; ++comp) ((float*)vtx_logical)[dest_idx+comp+0] = vertices_array[vert*3 + comp]; /* 0 */ for(int comp=0; comp<3; ++comp) ((float*)vtx_logical)[dest_idx+comp+3] = normals_array[vert*3 + comp]; /* 1 */ for(int comp=0; comp<2; ++comp) ((float*)vtx_logical)[dest_idx+comp+6] = texture_coordinates_array[vert*2 + comp]; /* 2 */ } #else printf("Interleaving vertices and copying index buffer...\n"); assert(COMPANION_VERTEX_COUNT*(3+3+2)*sizeof(float) < VERTEX_BUFFER_SIZE); for(int vert=0; vert<COMPANION_VERTEX_COUNT; ++vert) { int dest_idx = vert * (3 + 3 + 2); for(int comp=0; comp<3; ++comp) ((float*)vtx_logical)[dest_idx+comp+0] = companion_vertices[vert][comp]; /* 0 */ for(int comp=0; comp<3; ++comp) ((float*)vtx_logical)[dest_idx+comp+3] = companion_normals[vert][comp]; /* 1 */ for(int comp=0; comp<2; ++comp) ((float*)vtx_logical)[dest_idx+comp+6] = companion_texture_coordinates[vert][comp]; /* 2 */ } assert(COMPANION_TRIANGLE_COUNT*3*sizeof(unsigned short) < INDEX_BUFFER_SIZE); memcpy(idx_logical, &companion_triangles[0][0], COMPANION_TRIANGLE_COUNT*3*sizeof(unsigned short)); #endif pipe_buffer_unmap(pipe, vtx_transfer); pipe_buffer_unmap(pipe, idx_transfer); struct pipe_vertex_buffer vertex_buffer_desc = { .stride = (3 + 3 + 2)*4, .buffer_offset = 0, .buffer = vtx_resource, .user_buffer = 0 }; struct pipe_index_buffer index_buffer_desc = { .index_size = sizeof(unsigned short), .offset = 0, .buffer = idx_resource, .user_buffer = 0 }; struct pipe_vertex_element pipe_vertex_elements[] = { { /* positions */ .src_offset = 0, .instance_divisor = 0, .vertex_buffer_index = 0, .src_format = PIPE_FORMAT_R32G32B32_FLOAT }, { /* normals */ .src_offset = 0xc, .instance_divisor = 0, .vertex_buffer_index = 0, .src_format = PIPE_FORMAT_R32G32B32_FLOAT }, { /* texture coord */ .src_offset = 0x18, .instance_divisor = 0, .vertex_buffer_index = 0, .src_format = PIPE_FORMAT_R32G32_FLOAT } };
/** * Called by VBO to draw arrays when in selection or feedback mode and * to implement glRasterPos. * This is very much like the normal draw_vbo() function above. * Look at code refactoring some day. * Might move this into the failover module some day. */ void st_feedback_draw_vbo(GLcontext *ctx, const struct gl_client_array **arrays, const struct _mesa_prim *prims, GLuint nr_prims, const struct _mesa_index_buffer *ib, GLboolean index_bounds_valid, GLuint min_index, GLuint max_index) { struct st_context *st = ctx->st; struct pipe_context *pipe = st->pipe; struct draw_context *draw = st->draw; const struct st_vertex_program *vp; const struct pipe_shader_state *vs; struct pipe_buffer *index_buffer_handle = 0; struct pipe_vertex_buffer vbuffers[PIPE_MAX_SHADER_INPUTS]; struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS]; GLuint attr, i; ubyte *mapped_constants; assert(draw); st_validate_state(ctx->st); if (!index_bounds_valid) vbo_get_minmax_index(ctx, prims, ib, &min_index, &max_index); /* must get these after state validation! */ vp = ctx->st->vp; vs = &st->vp->state; if (!st->vp->draw_shader) { st->vp->draw_shader = draw_create_vertex_shader(draw, vs); } /* * Set up the draw module's state. * * We'd like to do this less frequently, but the normal state-update * code sends state updates to the pipe, not to our private draw module. */ assert(draw); draw_set_viewport_state(draw, &st->state.viewport); draw_set_clip_state(draw, &st->state.clip); draw_set_rasterizer_state(draw, &st->state.rasterizer); draw_bind_vertex_shader(draw, st->vp->draw_shader); set_feedback_vertex_format(ctx); /* loop over TGSI shader inputs to determine vertex buffer * and attribute info */ for (attr = 0; attr < vp->num_inputs; attr++) { const GLuint mesaAttr = vp->index_to_input[attr]; struct gl_buffer_object *bufobj = arrays[mesaAttr]->BufferObj; void *map; if (bufobj && bufobj->Name) { /* Attribute data is in a VBO. * Recall that for VBOs, the gl_client_array->Ptr field is * really an offset from the start of the VBO, not a pointer. */ struct st_buffer_object *stobj = st_buffer_object(bufobj); assert(stobj->buffer); vbuffers[attr].buffer = NULL; pipe_buffer_reference(&vbuffers[attr].buffer, stobj->buffer); vbuffers[attr].buffer_offset = pointer_to_offset(arrays[0]->Ptr); velements[attr].src_offset = arrays[mesaAttr]->Ptr - arrays[0]->Ptr; } else { /* attribute data is in user-space memory, not a VBO */ uint bytes = (arrays[mesaAttr]->Size * _mesa_sizeof_type(arrays[mesaAttr]->Type) * (max_index + 1)); /* wrap user data */ vbuffers[attr].buffer = pipe_user_buffer_create(pipe->screen, (void *) arrays[mesaAttr]->Ptr, bytes); vbuffers[attr].buffer_offset = 0; velements[attr].src_offset = 0; } /* common-case setup */ vbuffers[attr].stride = arrays[mesaAttr]->StrideB; /* in bytes */ vbuffers[attr].max_index = max_index; velements[attr].vertex_buffer_index = attr; velements[attr].nr_components = arrays[mesaAttr]->Size; velements[attr].src_format = st_pipe_vertex_format(arrays[mesaAttr]->Type, arrays[mesaAttr]->Size, arrays[mesaAttr]->Format, arrays[mesaAttr]->Normalized); assert(velements[attr].src_format); /* tell draw about this attribute */ #if 0 draw_set_vertex_buffer(draw, attr, &vbuffer[attr]); #endif /* map the attrib buffer */ map = pipe_buffer_map(pipe->screen, vbuffers[attr].buffer, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_vertex_buffer(draw, attr, map); } draw_set_vertex_buffers(draw, vp->num_inputs, vbuffers); draw_set_vertex_elements(draw, vp->num_inputs, velements); if (ib) { struct gl_buffer_object *bufobj = ib->obj; unsigned indexSize; void *map; switch (ib->type) { case GL_UNSIGNED_INT: indexSize = 4; break; case GL_UNSIGNED_SHORT: indexSize = 2; break; default: assert(0); return; } if (bufobj && bufobj->Name) { struct st_buffer_object *stobj = st_buffer_object(bufobj); index_buffer_handle = stobj->buffer; map = pipe_buffer_map(pipe->screen, index_buffer_handle, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_element_buffer(draw, indexSize, map); } else { draw_set_mapped_element_buffer(draw, indexSize, (void *) ib->ptr); } } else { /* no index/element buffer */ draw_set_mapped_element_buffer(draw, 0, NULL); } /* map constant buffers */ mapped_constants = pipe_buffer_map(pipe->screen, st->state.constants[PIPE_SHADER_VERTEX].buffer, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_constant_buffer(st->draw, mapped_constants, st->state.constants[PIPE_SHADER_VERTEX].buffer->size); /* draw here */ for (i = 0; i < nr_prims; i++) { draw_arrays(draw, prims[i].mode, prims[i].start, prims[i].count); } /* unmap constant buffers */ pipe_buffer_unmap(pipe->screen, st->state.constants[PIPE_SHADER_VERTEX].buffer); /* * unmap vertex/index buffers */ for (i = 0; i < PIPE_MAX_ATTRIBS; i++) { if (draw->pt.vertex_buffer[i].buffer) { pipe_buffer_unmap(pipe->screen, draw->pt.vertex_buffer[i].buffer); pipe_buffer_reference(&draw->pt.vertex_buffer[i].buffer, NULL); draw_set_mapped_vertex_buffer(draw, i, NULL); } } if (index_buffer_handle) { pipe_buffer_unmap(pipe->screen, index_buffer_handle); draw_set_mapped_element_buffer(draw, 0, NULL); } }
int main(int argc, char **argv) { int width = 1920; int height = 1080; bool do_clear = false; bool super_tiled = false; bool enable_ts = true; bool early_z = false; int num_frames = 2000; unsigned fmt_rt = PIPE_FORMAT_B8G8R8X8_UNORM; unsigned fmt_zs = PIPE_FORMAT_S8_UINT_Z24_UNORM; int opt; int error = 0; while ((opt = getopt(argc, argv, "w:h:l:s:t:e:f:d:c:")) != -1) { switch(opt) { case 'w': width = atoi(optarg); break; case 'h': height = atoi(optarg); break; case 'l': do_clear = atoi(optarg); break; case 's': super_tiled = atoi(optarg); break; case 't': enable_ts = atoi(optarg); break; case 'e': early_z = atoi(optarg); break; case 'f': num_frames = atoi(optarg); break; case 'd': switch(atoi(optarg)) { case 0: fmt_zs = PIPE_FORMAT_NONE; break; case 16: fmt_zs = PIPE_FORMAT_Z16_UNORM; break; case 32: fmt_zs = PIPE_FORMAT_S8_UINT_Z24_UNORM; break; default: printf("Invalid depth stencil surface depth %s\n", optarg); error = 1; } break; case 'c': switch(atoi(optarg)) { case 0: fmt_rt = PIPE_FORMAT_NONE; break; case 16: fmt_rt = PIPE_FORMAT_B5G6R5_UNORM; break; case 32: fmt_rt = PIPE_FORMAT_B8G8R8X8_UNORM; break; default: printf("Invalid color surface depth %s\n", optarg); error = 1; } break; default: printf("Unknown argument %c\n", opt); error = 1; } } if(error) { printf("Usage:\n"); printf(" %s [-w <width>] [-h <height>] [-l <0/1>] [-s <0/1>] [-t <0/1>] [-e <0/1>] [-f <frames>] [-d <0/16/32>] [-c <16/32>]\n", argv[0]); printf("\n"); printf(" -w <width> Width of surface (default is 1920)\n"); printf(" -h <height> Height of surface (default is 1080)\n"); printf(" -l <0/1> Clear surface every frame (0=no, 1=yes, default is 0)\n"); printf(" -s <0/1> Use supertile layout (0=no, 1=yes, default is 0)\n"); printf(" -t <0/1> Enable TS (0=no, 1=yes, default is 1)\n"); printf(" -e <0/1> Enable early Z (0=no, 1=yes, default is 0)\n"); printf(" -f <frames> Number of frames to render (default is 2000)\n"); printf(" -d <0/16/32> Depth/stencil surface depth\n"); printf(" -c <16/32> Color surface depth\n"); exit(1); } struct fbdemos_scaffold *fbs = 0; fbdemo_init(&fbs); struct pipe_context *pipe = fbs->pipe; /* resources */ struct pipe_resource *rt_resource = NULL; struct pipe_resource *z_resource = NULL; if(!super_tiled) etna_mesa_debug |= ETNA_DBG_NO_SUPERTILE; if(!enable_ts) etna_mesa_debug |= ETNA_DBG_NO_TS; if(!early_z) etna_mesa_debug |= ETNA_DBG_NO_EARLY_Z; if(fmt_rt != PIPE_FORMAT_NONE) rt_resource = fbdemo_create_2d(fbs->screen, PIPE_BIND_RENDER_TARGET, fmt_rt, width, height, 0); if(fmt_zs != PIPE_FORMAT_NONE) z_resource = fbdemo_create_2d(fbs->screen, PIPE_BIND_RENDER_TARGET, fmt_zs, width, height, 0); struct pipe_resource *vtx_resource = pipe_buffer_create(fbs->screen, PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE, sizeof(vVertices)); /* vertex / index buffer setup */ struct pipe_transfer *vtx_transfer = 0; float *vtx_logical = pipe_buffer_map(pipe, vtx_resource, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED, &vtx_transfer); assert(vtx_logical); memcpy(vtx_logical, vVertices, sizeof(vVertices)); pipe_buffer_unmap(pipe, vtx_transfer); struct pipe_vertex_buffer vertex_buf_desc = { .stride = VERTEX_STRIDE*4, .buffer_offset = 0, .buffer = vtx_resource, .user_buffer = 0 }; struct pipe_vertex_element pipe_vertex_elements[] = { { /* positions */ .src_offset = 0*4, .instance_divisor = 0, .vertex_buffer_index = 0, .src_format = PIPE_FORMAT_R32G32B32A32_FLOAT }, { /* texcoord */ .src_offset = 4*4, .instance_divisor = 0, .vertex_buffer_index = 0, .src_format = PIPE_FORMAT_R32G32B32A32_FLOAT }, };
enum pipe_error svga_swtnl_draw_range_elements(struct svga_context *svga, struct pipe_buffer *indexBuffer, unsigned indexSize, unsigned min_index, unsigned max_index, unsigned prim, unsigned start, unsigned count) { struct draw_context *draw = svga->swtnl.draw; unsigned i; const void *map; enum pipe_error ret; assert(!svga->dirty); assert(svga->state.sw.need_swtnl); assert(draw); ret = svga_update_state(svga, SVGA_STATE_SWTNL_DRAW); if (ret) { svga_context_flush(svga, NULL); ret = svga_update_state(svga, SVGA_STATE_SWTNL_DRAW); svga->swtnl.new_vbuf = TRUE; assert(ret == PIPE_OK); } /* * Map vertex buffers */ for (i = 0; i < svga->curr.num_vertex_buffers; i++) { map = pipe_buffer_map(svga->pipe.screen, svga->curr.vb[i].buffer, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_vertex_buffer(draw, i, map); } /* Map index buffer, if present */ if (indexBuffer) { map = pipe_buffer_map(svga->pipe.screen, indexBuffer, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_element_buffer_range(draw, indexSize, min_index, max_index, map); } if (svga->curr.cb[PIPE_SHADER_VERTEX]) { map = pipe_buffer_map(svga->pipe.screen, svga->curr.cb[PIPE_SHADER_VERTEX], PIPE_BUFFER_USAGE_CPU_READ); assert(map); draw_set_mapped_constant_buffer( draw, PIPE_SHADER_VERTEX, 0, map, svga->curr.cb[PIPE_SHADER_VERTEX]->size); } draw_arrays(svga->swtnl.draw, prim, start, count); draw_flush(svga->swtnl.draw); /* Ensure the draw module didn't touch this */ assert(i == svga->curr.num_vertex_buffers); /* * unmap vertex/index buffers */ for (i = 0; i < svga->curr.num_vertex_buffers; i++) { pipe_buffer_unmap(svga->pipe.screen, svga->curr.vb[i].buffer); draw_set_mapped_vertex_buffer(draw, i, NULL); } if (indexBuffer) { pipe_buffer_unmap(svga->pipe.screen, indexBuffer); draw_set_mapped_element_buffer(draw, 0, NULL); } if (svga->curr.cb[PIPE_SHADER_VERTEX]) { pipe_buffer_unmap(svga->pipe.screen, svga->curr.cb[PIPE_SHADER_VERTEX]); } return ret; }
static boolean nv30_vertprog_validate(struct nv30_context *nv30) { struct pipe_screen *pscreen = nv30->pipe.screen; struct nouveau_grobj *rankine = nv30->screen->rankine; struct nv30_vertex_program *vp; struct pipe_buffer *constbuf; boolean upload_code = FALSE, upload_data = FALSE; int i; vp = nv30->vertprog; constbuf = nv30->constbuf[PIPE_SHADER_VERTEX]; /* Translate TGSI shader into hw bytecode */ if (!vp->translated) { nv30_vertprog_translate(nv30, vp); if (!vp->translated) return FALSE; } /* Allocate hw vtxprog exec slots */ if (!vp->exec) { struct nouveau_resource *heap = nv30->screen->vp_exec_heap; struct nouveau_stateobj *so; uint vplen = vp->nr_insns; if (nouveau_resource_alloc(heap, vplen, vp, &vp->exec)) { while (heap->next && heap->size < vplen) { struct nv30_vertex_program *evict; evict = heap->next->priv; nouveau_resource_free(&evict->exec); } if (nouveau_resource_alloc(heap, vplen, vp, &vp->exec)) assert(0); } so = so_new(2, 0); so_method(so, rankine, NV34TCL_VP_START_FROM_ID, 1); so_data (so, vp->exec->start); so_ref(so, &vp->so); so_ref(NULL, &so); upload_code = TRUE; } /* Allocate hw vtxprog const slots */ if (vp->nr_consts && !vp->data) { struct nouveau_resource *heap = nv30->screen->vp_data_heap; if (nouveau_resource_alloc(heap, vp->nr_consts, vp, &vp->data)) { while (heap->next && heap->size < vp->nr_consts) { struct nv30_vertex_program *evict; evict = heap->next->priv; nouveau_resource_free(&evict->data); } if (nouveau_resource_alloc(heap, vp->nr_consts, vp, &vp->data)) assert(0); } /*XXX: handle this some day */ assert(vp->data->start >= vp->data_start_min); upload_data = TRUE; if (vp->data_start != vp->data->start) upload_code = TRUE; } /* If exec or data segments moved we need to patch the program to * fixup offsets and register IDs. */ if (vp->exec_start != vp->exec->start) { for (i = 0; i < vp->nr_insns; i++) { struct nv30_vertex_program_exec *vpi = &vp->insns[i]; if (vpi->has_branch_offset) { assert(0); } } vp->exec_start = vp->exec->start; } if (vp->nr_consts && vp->data_start != vp->data->start) { for (i = 0; i < vp->nr_insns; i++) { struct nv30_vertex_program_exec *vpi = &vp->insns[i]; if (vpi->const_index >= 0) { vpi->data[1] &= ~NV30_VP_INST_CONST_SRC_MASK; vpi->data[1] |= (vpi->const_index + vp->data->start) << NV30_VP_INST_CONST_SRC_SHIFT; } } vp->data_start = vp->data->start; } /* Update + Upload constant values */ if (vp->nr_consts) { float *map = NULL; if (constbuf) { map = pipe_buffer_map(pscreen, constbuf, PIPE_BUFFER_USAGE_CPU_READ); } for (i = 0; i < vp->nr_consts; i++) { struct nv30_vertex_program_data *vpd = &vp->consts[i]; if (vpd->index >= 0) { if (!upload_data && !memcmp(vpd->value, &map[vpd->index * 4], 4 * sizeof(float))) continue; memcpy(vpd->value, &map[vpd->index * 4], 4 * sizeof(float)); } BEGIN_RING(rankine, NV34TCL_VP_UPLOAD_CONST_ID, 5); OUT_RING (i + vp->data->start); OUT_RINGp ((uint32_t *)vpd->value, 4); } if (constbuf) pipe_buffer_unmap(pscreen, constbuf); } /* Upload vtxprog */ if (upload_code) { #if 0 for (i = 0; i < vp->nr_insns; i++) { NOUVEAU_MSG("VP inst %d: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, vp->insns[i].data[0], vp->insns[i].data[1], vp->insns[i].data[2], vp->insns[i].data[3]); } #endif BEGIN_RING(rankine, NV34TCL_VP_UPLOAD_FROM_ID, 1); OUT_RING (vp->exec->start); for (i = 0; i < vp->nr_insns; i++) { BEGIN_RING(rankine, NV34TCL_VP_UPLOAD_INST(0), 4); OUT_RINGp (vp->insns[i].data, 4); } } if (vp->so != nv30->state.hw[NV30_STATE_VERTPROG]) { so_ref(vp->so, &nv30->state.hw[NV30_STATE_VERTPROG]); return TRUE; } return FALSE; }
static void u_vbuf_translate_begin(struct u_vbuf_priv *mgr, int min_index, int max_index) { struct translate_key key; struct translate_element *te; unsigned tr_elem_index[PIPE_MAX_ATTRIBS]; struct translate *tr; boolean vb_translated[PIPE_MAX_ATTRIBS] = {0}; uint8_t *out_map; struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}; struct pipe_resource *out_buffer = NULL; unsigned i, num_verts, out_offset; boolean upload_flushed = FALSE; memset(&key, 0, sizeof(key)); memset(tr_elem_index, 0xff, sizeof(tr_elem_index)); /* Get a new vertex buffer slot. */ mgr->fallback_vb_slot = u_vbuf_get_free_real_vb_slot(mgr); if (mgr->fallback_vb_slot == ~0) { return; /* XXX error, not enough attribs */ } /* Initialize the description of how vertices should be translated. */ for (i = 0; i < mgr->ve->count; i++) { enum pipe_format output_format = mgr->ve->native_format[i]; unsigned output_format_size = mgr->ve->native_format_size[i]; /* Check for support. */ if (!mgr->ve->incompatible_layout_elem[i] && !mgr->incompatible_vb[mgr->ve->ve[i].vertex_buffer_index]) { continue; } /* Workaround for translate: output floats instead of halfs. */ switch (output_format) { case PIPE_FORMAT_R16_FLOAT: output_format = PIPE_FORMAT_R32_FLOAT; output_format_size = 4; break; case PIPE_FORMAT_R16G16_FLOAT: output_format = PIPE_FORMAT_R32G32_FLOAT; output_format_size = 8; break; case PIPE_FORMAT_R16G16B16_FLOAT: output_format = PIPE_FORMAT_R32G32B32_FLOAT; output_format_size = 12; break; case PIPE_FORMAT_R16G16B16A16_FLOAT: output_format = PIPE_FORMAT_R32G32B32A32_FLOAT; output_format_size = 16; break; default:; } /* Add this vertex element. */ te = &key.element[key.nr_elements]; te->type = TRANSLATE_ELEMENT_NORMAL; te->instance_divisor = 0; te->input_buffer = mgr->ve->ve[i].vertex_buffer_index; te->input_format = mgr->ve->ve[i].src_format; te->input_offset = mgr->ve->ve[i].src_offset; te->output_format = output_format; te->output_offset = key.output_stride; key.output_stride += output_format_size; vb_translated[mgr->ve->ve[i].vertex_buffer_index] = TRUE; tr_elem_index[i] = key.nr_elements; key.nr_elements++; } /* Get a translate object. */ tr = translate_cache_find(mgr->translate_cache, &key); /* Map buffers we want to translate. */ for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { if (vb_translated[i]) { struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[i]; uint8_t *map = pipe_buffer_map(mgr->pipe, vb->buffer, PIPE_TRANSFER_READ, &vb_transfer[i]); tr->set_buffer(tr, i, map + vb->buffer_offset + vb->stride * min_index, vb->stride, ~0); } } /* Create and map the output buffer. */ num_verts = max_index + 1 - min_index; u_upload_alloc(mgr->b.uploader, key.output_stride * min_index, key.output_stride * num_verts, &out_offset, &out_buffer, &upload_flushed, (void**)&out_map); out_offset -= key.output_stride * min_index; /* Translate. */ tr->run(tr, 0, num_verts, 0, out_map); /* Unmap all buffers. */ for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { if (vb_translated[i]) { pipe_buffer_unmap(mgr->pipe, vb_transfer[i]); } } /* Setup the new vertex buffer. */ mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer_offset = out_offset; mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].stride = key.output_stride; /* Move the buffer reference. */ pipe_resource_reference( &mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer, NULL); mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer = out_buffer; out_buffer = NULL; /* Setup new vertex elements. */ for (i = 0; i < mgr->ve->count; i++) { if (tr_elem_index[i] < key.nr_elements) { te = &key.element[tr_elem_index[i]]; mgr->fallback_velems[i].instance_divisor = mgr->ve->ve[i].instance_divisor; mgr->fallback_velems[i].src_format = te->output_format; mgr->fallback_velems[i].src_offset = te->output_offset; mgr->fallback_velems[i].vertex_buffer_index = mgr->fallback_vb_slot; } else { memcpy(&mgr->fallback_velems[i], &mgr->ve->ve[i], sizeof(struct pipe_vertex_element)); } } mgr->fallback_ve = mgr->pipe->create_vertex_elements_state(mgr->pipe, mgr->ve->count, mgr->fallback_velems); /* Preserve saved_ve. */ mgr->ve_binding_lock = TRUE; mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->fallback_ve); mgr->ve_binding_lock = FALSE; }
/** * Emit all the constants in a constant buffer for a shader stage. * On VGPU10, emit_consts_vgpu10 is used instead. */ static enum pipe_error emit_consts_vgpu9(struct svga_context *svga, unsigned shader) { const struct pipe_constant_buffer *cbuf; struct svga_screen *ss = svga_screen(svga->pipe.screen); struct pipe_transfer *transfer = NULL; unsigned count; const float (*data)[4] = NULL; unsigned i; enum pipe_error ret = PIPE_OK; const unsigned offset = 0; assert(shader < PIPE_SHADER_TYPES); assert(!svga_have_vgpu10(svga)); /* Only one constant buffer per shader is supported before VGPU10. * This is only an approximate check against that. */ assert(svga->curr.constbufs[shader][1].buffer == NULL); cbuf = &svga->curr.constbufs[shader][0]; if (svga->curr.constbufs[shader][0].buffer) { /* emit user-provided constants */ data = (const float (*)[4]) pipe_buffer_map(&svga->pipe, svga->curr.constbufs[shader][0].buffer, PIPE_TRANSFER_READ, &transfer); if (!data) { return PIPE_ERROR_OUT_OF_MEMORY; } /* sanity check */ assert(cbuf->buffer->width0 >= cbuf->buffer_size); /* Use/apply the constant buffer size and offsets here */ count = cbuf->buffer_size / (4 * sizeof(float)); data += cbuf->buffer_offset / (4 * sizeof(float)); if (ss->hw_version >= SVGA3D_HWVERSION_WS8_B1) { ret = emit_const_range( svga, shader, offset, count, data ); } else { for (i = 0; i < count; i++) { ret = emit_const( svga, shader, offset + i, data[i] ); if (ret != PIPE_OK) { break; } } } pipe_buffer_unmap(&svga->pipe, transfer); if (ret != PIPE_OK) { return ret; } } /* emit extra shader constants */ { const struct svga_shader_variant *variant = NULL; unsigned offset; float extras[MAX_EXTRA_CONSTS][4]; unsigned count, i; switch (shader) { case PIPE_SHADER_VERTEX: variant = svga->state.hw_draw.vs; count = svga_get_extra_vs_constants(svga, (float *) extras); break; case PIPE_SHADER_FRAGMENT: variant = svga->state.hw_draw.fs; count = svga_get_extra_fs_constants(svga, (float *) extras); break; default: assert(!"Unexpected shader type"); count = 0; } assert(variant); offset = variant->shader->info.file_max[TGSI_FILE_CONSTANT] + 1; assert(count <= ARRAY_SIZE(extras)); if (count > 0) { if (ss->hw_version >= SVGA3D_HWVERSION_WS8_B1) { ret = emit_const_range(svga, shader, offset, count, (const float (*) [4])extras); } else { for (i = 0; i < count; i++) { ret = emit_const(svga, shader, offset + i, extras[i]); if (ret != PIPE_OK) return ret; } } } } return ret; }
enum pipe_error svga_swtnl_draw_vbo(struct svga_context *svga, const struct pipe_draw_info *info) { struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS]; struct pipe_transfer *ib_transfer = NULL; struct pipe_transfer *cb_transfer = NULL; struct draw_context *draw = svga->swtnl.draw; unsigned i; const void *map; enum pipe_error ret; assert(!svga->dirty); assert(svga->state.sw.need_swtnl); assert(draw); /* Make sure that the need_swtnl flag does not go away */ svga->state.sw.in_swtnl_draw = TRUE; ret = svga_update_state(svga, SVGA_STATE_SWTNL_DRAW); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = svga_update_state(svga, SVGA_STATE_SWTNL_DRAW); svga->swtnl.new_vbuf = TRUE; assert(ret == PIPE_OK); } /* * Map vertex buffers */ for (i = 0; i < svga->curr.num_vertex_buffers; i++) { if (svga->curr.vb[i].buffer) { map = pipe_buffer_map(&svga->pipe, svga->curr.vb[i].buffer, PIPE_TRANSFER_READ, &vb_transfer[i]); draw_set_mapped_vertex_buffer(draw, i, map); } } /* Map index buffer, if present */ map = NULL; if (info->indexed && svga->curr.ib.buffer) { map = pipe_buffer_map(&svga->pipe, svga->curr.ib.buffer, PIPE_TRANSFER_READ, &ib_transfer); draw_set_indexes(draw, (const ubyte *) map + svga->curr.ib.offset, svga->curr.ib.index_size); } if (svga->curr.cb[PIPE_SHADER_VERTEX]) { map = pipe_buffer_map(&svga->pipe, svga->curr.cb[PIPE_SHADER_VERTEX], PIPE_TRANSFER_READ, &cb_transfer); assert(map); draw_set_mapped_constant_buffer( draw, PIPE_SHADER_VERTEX, 0, map, svga->curr.cb[PIPE_SHADER_VERTEX]->width0); } draw_vbo(draw, info); draw_flush(svga->swtnl.draw); /* Ensure the draw module didn't touch this */ assert(i == svga->curr.num_vertex_buffers); /* * unmap vertex/index buffers */ for (i = 0; i < svga->curr.num_vertex_buffers; i++) { if (svga->curr.vb[i].buffer) { pipe_buffer_unmap(&svga->pipe, vb_transfer[i]); draw_set_mapped_vertex_buffer(draw, i, NULL); } } if (ib_transfer) { pipe_buffer_unmap(&svga->pipe, ib_transfer); draw_set_indexes(draw, NULL, 0); } if (svga->curr.cb[PIPE_SHADER_VERTEX]) { pipe_buffer_unmap(&svga->pipe, cb_transfer); } /* Now safe to remove the need_swtnl flag in any update_state call */ svga->state.sw.in_swtnl_draw = FALSE; svga->dirty |= SVGA_NEW_NEED_PIPELINE | SVGA_NEW_NEED_SWVFETCH; return ret; }
static boolean i915_draw_range_elements(struct pipe_context *pipe, struct pipe_buffer *indexBuffer, unsigned indexSize, unsigned min_index, unsigned max_index, unsigned prim, unsigned start, unsigned count) { struct i915_context *i915 = i915_context(pipe); struct draw_context *draw = i915->draw; unsigned i; if (i915->dirty) i915_update_derived(i915); /* * Map vertex buffers */ for (i = 0; i < i915->num_vertex_buffers; i++) { void *buf = pipe_buffer_map(pipe->screen, i915->vertex_buffer[i].buffer, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_vertex_buffer(draw, i, buf); } /* * Map index buffer, if present */ if (indexBuffer) { void *mapped_indexes = pipe_buffer_map(pipe->screen, indexBuffer, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_element_buffer_range(draw, indexSize, min_index, max_index, mapped_indexes); } else { draw_set_mapped_element_buffer(draw, 0, NULL); } draw_set_mapped_constant_buffer(draw, i915->current.constants[PIPE_SHADER_VERTEX], (i915->current.num_user_constants[PIPE_SHADER_VERTEX] * 4 * sizeof(float))); /* * Do the drawing */ draw_arrays(i915->draw, prim, start, count); /* * unmap vertex/index buffers */ for (i = 0; i < i915->num_vertex_buffers; i++) { pipe_buffer_unmap(pipe->screen, i915->vertex_buffer[i].buffer); draw_set_mapped_vertex_buffer(draw, i, NULL); } if (indexBuffer) { pipe_buffer_unmap(pipe->screen, indexBuffer); draw_set_mapped_element_buffer_range(draw, 0, start, start + count - 1, NULL); } return TRUE; }