/** * Split VBO into smaller pieces, draw the pieces. */ void vbo_split_copy( struct gl_context *ctx, const struct gl_client_array *arrays[], const struct _mesa_prim *prim, GLuint nr_prims, const struct _mesa_index_buffer *ib, vbo_draw_func draw, const struct split_limits *limits ) { struct copy_context copy; GLuint i; memset(©, 0, sizeof(copy)); /* Require indexed primitives: */ assert(ib); copy.ctx = ctx; copy.array = arrays; copy.prim = prim; copy.nr_prims = nr_prims; copy.ib = ib; copy.draw = draw; copy.limits = limits; /* Clear the vertex cache: */ for (i = 0; i < ELT_TABLE_SIZE; i++) copy.vert_cache[i].in = ~0; replay_init(©); replay_elts(©); replay_finish(©); }
/** * Split VBO into smaller pieces, draw the pieces. */ void vbo_split_copy( struct gl_context *ctx, const struct gl_client_array *arrays[], const struct _mesa_prim *prim, GLuint nr_prims, const struct _mesa_index_buffer *ib, vbo_draw_func draw, const struct split_limits *limits ) { struct copy_context copy; GLuint i, this_nr_prims; for (i = 0; i < nr_prims;) { /* Our SW TNL pipeline doesn't handle basevertex yet, so bind_indices * will rebase the elements to the basevertex, and we'll only * emit strings of prims with the same basevertex in one draw call. */ for (this_nr_prims = 1; i + this_nr_prims < nr_prims; this_nr_prims++) { if (prim[i].basevertex != prim[i + this_nr_prims].basevertex) break; } memset(©, 0, sizeof(copy)); /* Require indexed primitives: */ assert(ib); copy.ctx = ctx; copy.array = arrays; copy.prim = &prim[i]; copy.nr_prims = this_nr_prims; copy.ib = ib; copy.draw = draw; copy.limits = limits; /* Clear the vertex cache: */ for (i = 0; i < ELT_TABLE_SIZE; i++) copy.vert_cache[i].in = ~0; replay_init(©); replay_elts(©); replay_finish(©); } }