/** * Compute the VUE map for tessellation control shader outputs and * tessellation evaluation shader inputs. */ void brw_compute_tess_vue_map(struct brw_vue_map *vue_map, GLbitfield64 vertex_slots, GLbitfield patch_slots) { /* I don't think anything actually uses this... */ vue_map->slots_valid = vertex_slots; vertex_slots &= ~(VARYING_BIT_TESS_LEVEL_OUTER | VARYING_BIT_TESS_LEVEL_INNER); /* Make sure that the values we store in vue_map->varying_to_slot and * vue_map->slot_to_varying won't overflow the signed chars that are used * to store them. Note that since vue_map->slot_to_varying sometimes holds * values equal to VARYING_SLOT_TESS_MAX , we need to ensure that * VARYING_SLOT_TESS_MAX is <= 127, not 128. */ STATIC_ASSERT(VARYING_SLOT_TESS_MAX <= 127); for (int i = 0; i < VARYING_SLOT_TESS_MAX ; ++i) { vue_map->varying_to_slot[i] = -1; vue_map->slot_to_varying[i] = BRW_VARYING_SLOT_PAD; } int slot = 0; /* The first 8 DWords are reserved for the "Patch Header". * * VARYING_SLOT_TESS_LEVEL_OUTER / INNER live here, but the exact layout * depends on the domain type. They might not be in slots 0 and 1 as * described here, but pretending they're separate allows us to uniquely * identify them by distinct slot locations. */ assign_vue_slot(vue_map, VARYING_SLOT_TESS_LEVEL_INNER, slot++); assign_vue_slot(vue_map, VARYING_SLOT_TESS_LEVEL_OUTER, slot++); /* first assign per-patch varyings */ while (patch_slots != 0) { const int varying = ffsll(patch_slots) - 1; if (vue_map->varying_to_slot[varying + VARYING_SLOT_PATCH0] == -1) { assign_vue_slot(vue_map, varying + VARYING_SLOT_PATCH0, slot++); } patch_slots &= ~BITFIELD64_BIT(varying); } /* apparently, including the patch header... */ vue_map->num_per_patch_slots = slot; /* then assign per-vertex varyings for each vertex in our patch */ while (vertex_slots != 0) { const int varying = ffsll(vertex_slots) - 1; if (vue_map->varying_to_slot[varying] == -1) { assign_vue_slot(vue_map, varying, slot++); } vertex_slots &= ~BITFIELD64_BIT(varying); } vue_map->num_per_vertex_slots = slot - vue_map->num_per_patch_slots; vue_map->num_slots = slot; }
/** * Compute the VUE map for vertex shader program. * * Note that consumers of this map using cache keys must include * prog_data->userclip and prog_data->outputs_written in their key * (generated by CACHE_NEW_VS_PROG). */ void brw_compute_vue_map(struct brw_context *brw, struct brw_vue_map *vue_map, GLbitfield64 slots_valid, bool userclip_active) { vue_map->slots_valid = slots_valid; int i; /* Make sure that the values we store in vue_map->varying_to_slot and * vue_map->slot_to_varying won't overflow the signed chars that are used * to store them. Note that since vue_map->slot_to_varying sometimes holds * values equal to BRW_VARYING_SLOT_COUNT, we need to ensure that * BRW_VARYING_SLOT_COUNT is <= 127, not 128. */ STATIC_ASSERT(BRW_VARYING_SLOT_COUNT <= 127); vue_map->num_slots = 0; for (i = 0; i < BRW_VARYING_SLOT_COUNT; ++i) { vue_map->varying_to_slot[i] = -1; vue_map->slot_to_varying[i] = BRW_VARYING_SLOT_COUNT; } /* VUE header: format depends on chip generation and whether clipping is * enabled. */ switch (brw->gen) { case 4: case 5: /* There are 8 dwords in VUE header pre-Ironlake: * dword 0-3 is indices, point width, clip flags. * dword 4-7 is ndc position * dword 8-11 is the first vertex data. * * On Ironlake the VUE header is nominally 20 dwords, but the hardware * will accept the same header layout as Gen4 [and should be a bit faster] */ assign_vue_slot(vue_map, VARYING_SLOT_PSIZ); assign_vue_slot(vue_map, BRW_VARYING_SLOT_NDC); assign_vue_slot(vue_map, VARYING_SLOT_POS); break; case 6: case 7: /* There are 8 or 16 DWs (D0-D15) in VUE header on Sandybridge: * dword 0-3 of the header is indices, point width, clip flags. * dword 4-7 is the 4D space position * dword 8-15 of the vertex header is the user clip distance if * enabled. * dword 8-11 or 16-19 is the first vertex element data we fill. */ assign_vue_slot(vue_map, VARYING_SLOT_PSIZ); assign_vue_slot(vue_map, VARYING_SLOT_POS); if (userclip_active) { assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST0); assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST1); } /* front and back colors need to be consecutive so that we can use * ATTRIBUTE_SWIZZLE_INPUTATTR_FACING to swizzle them when doing * two-sided color. */ if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_COL0)) assign_vue_slot(vue_map, VARYING_SLOT_COL0); if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_BFC0)) assign_vue_slot(vue_map, VARYING_SLOT_BFC0); if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_COL1)) assign_vue_slot(vue_map, VARYING_SLOT_COL1); if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_BFC1)) assign_vue_slot(vue_map, VARYING_SLOT_BFC1); break; default: assert (!"VUE map not known for this chip generation"); break; } /* The hardware doesn't care about the rest of the vertex outputs, so just * assign them contiguously. Don't reassign outputs that already have a * slot. * * We generally don't need to assign a slot for VARYING_SLOT_CLIP_VERTEX, * since it's encoded as the clip distances by emit_clip_distances(). * However, it may be output by transform feedback, and we'd rather not * recompute state when TF changes, so we just always include it. */ for (int i = 0; i < VARYING_SLOT_MAX; ++i) { if ((slots_valid & BITFIELD64_BIT(i)) && vue_map->varying_to_slot[i] == -1) { assign_vue_slot(vue_map, i); } } }
/** * Compute the VUE map for vertex shader program. * * Note that consumers of this map using cache keys must include * prog_data->userclip and prog_data->outputs_written in their key * (generated by CACHE_NEW_VS_PROG). */ static void brw_compute_vue_map(struct brw_vs_compile *c) { struct brw_context *brw = c->func.brw; const struct intel_context *intel = &brw->intel; struct brw_vue_map *vue_map = &c->prog_data.vue_map; GLbitfield64 outputs_written = c->prog_data.outputs_written; int i; vue_map->num_slots = 0; for (i = 0; i < BRW_VERT_RESULT_MAX; ++i) { vue_map->vert_result_to_slot[i] = -1; vue_map->slot_to_vert_result[i] = BRW_VERT_RESULT_MAX; } /* VUE header: format depends on chip generation and whether clipping is * enabled. */ switch (intel->gen) { case 4: /* There are 8 dwords in VUE header pre-Ironlake: * dword 0-3 is indices, point width, clip flags. * dword 4-7 is ndc position * dword 8-11 is the first vertex data. */ assign_vue_slot(vue_map, VERT_RESULT_PSIZ); assign_vue_slot(vue_map, BRW_VERT_RESULT_NDC); assign_vue_slot(vue_map, VERT_RESULT_HPOS); break; case 5: /* There are 20 DWs (D0-D19) in VUE header on Ironlake: * dword 0-3 of the header is indices, point width, clip flags. * dword 4-7 is the ndc position * dword 8-11 of the vertex header is the 4D space position * dword 12-19 of the vertex header is the user clip distance. * dword 20-23 is a pad so that the vertex element data is aligned * dword 24-27 is the first vertex data we fill. * * Note: future pipeline stages expect 4D space position to be * contiguous with the other vert_results, so we make dword 24-27 a * duplicate copy of the 4D space position. */ assign_vue_slot(vue_map, VERT_RESULT_PSIZ); assign_vue_slot(vue_map, BRW_VERT_RESULT_NDC); assign_vue_slot(vue_map, BRW_VERT_RESULT_HPOS_DUPLICATE); assign_vue_slot(vue_map, VERT_RESULT_CLIP_DIST0); assign_vue_slot(vue_map, VERT_RESULT_CLIP_DIST1); assign_vue_slot(vue_map, BRW_VERT_RESULT_PAD); assign_vue_slot(vue_map, VERT_RESULT_HPOS); break; case 6: case 7: /* There are 8 or 16 DWs (D0-D15) in VUE header on Sandybridge: * dword 0-3 of the header is indices, point width, clip flags. * dword 4-7 is the 4D space position * dword 8-15 of the vertex header is the user clip distance if * enabled. * dword 8-11 or 16-19 is the first vertex element data we fill. */ assign_vue_slot(vue_map, VERT_RESULT_PSIZ); assign_vue_slot(vue_map, VERT_RESULT_HPOS); if (c->key.userclip_active) { assign_vue_slot(vue_map, VERT_RESULT_CLIP_DIST0); assign_vue_slot(vue_map, VERT_RESULT_CLIP_DIST1); } /* front and back colors need to be consecutive so that we can use * ATTRIBUTE_SWIZZLE_INPUTATTR_FACING to swizzle them when doing * two-sided color. */ if (outputs_written & BITFIELD64_BIT(VERT_RESULT_COL0)) assign_vue_slot(vue_map, VERT_RESULT_COL0); if (outputs_written & BITFIELD64_BIT(VERT_RESULT_BFC0)) assign_vue_slot(vue_map, VERT_RESULT_BFC0); if (outputs_written & BITFIELD64_BIT(VERT_RESULT_COL1)) assign_vue_slot(vue_map, VERT_RESULT_COL1); if (outputs_written & BITFIELD64_BIT(VERT_RESULT_BFC1)) assign_vue_slot(vue_map, VERT_RESULT_BFC1); break; default: assert (!"VUE map not known for this chip generation"); break; } /* The hardware doesn't care about the rest of the vertex outputs, so just * assign them contiguously. Don't reassign outputs that already have a * slot. * * Also, prior to Gen6, don't assign a slot for VERT_RESULT_CLIP_VERTEX, * since it is unsupported. In Gen6 and above, VERT_RESULT_CLIP_VERTEX may * be needed for transform feedback; since we don't want to have to * recompute the VUE map (and everything that depends on it) when transform * feedback is enabled or disabled, just go ahead and assign a slot for it. */ for (int i = 0; i < VERT_RESULT_MAX; ++i) { if (intel->gen < 6 && i == VERT_RESULT_CLIP_VERTEX) continue; if ((outputs_written & BITFIELD64_BIT(i)) && vue_map->vert_result_to_slot[i] == -1) { assign_vue_slot(vue_map, i); } } }
/** * Compute the VUE map for a shader stage. */ void brw_compute_vue_map(const struct gen_device_info *devinfo, struct brw_vue_map *vue_map, GLbitfield64 slots_valid, bool separate) { /* Keep using the packed/contiguous layout on old hardware - we only need * the SSO layout when using geometry/tessellation shaders or 32 FS input * varyings, which only exist on Gen >= 6. It's also a bit more efficient. */ if (devinfo->gen < 6) separate = false; vue_map->slots_valid = slots_valid; vue_map->separate = separate; /* gl_Layer and gl_ViewportIndex don't get their own varying slots -- they * are stored in the first VUE slot (VARYING_SLOT_PSIZ). */ slots_valid &= ~(VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT); /* Make sure that the values we store in vue_map->varying_to_slot and * vue_map->slot_to_varying won't overflow the signed chars that are used * to store them. Note that since vue_map->slot_to_varying sometimes holds * values equal to BRW_VARYING_SLOT_COUNT, we need to ensure that * BRW_VARYING_SLOT_COUNT is <= 127, not 128. */ STATIC_ASSERT(BRW_VARYING_SLOT_COUNT <= 127); for (int i = 0; i < BRW_VARYING_SLOT_COUNT; ++i) { vue_map->varying_to_slot[i] = -1; vue_map->slot_to_varying[i] = BRW_VARYING_SLOT_PAD; } int slot = 0; /* VUE header: format depends on chip generation and whether clipping is * enabled. * * See the Sandybridge PRM, Volume 2 Part 1, section 1.5.1 (page 30), * "Vertex URB Entry (VUE) Formats" which describes the VUE header layout. */ if (devinfo->gen < 6) { /* There are 8 dwords in VUE header pre-Ironlake: * dword 0-3 is indices, point width, clip flags. * dword 4-7 is ndc position * dword 8-11 is the first vertex data. * * On Ironlake the VUE header is nominally 20 dwords, but the hardware * will accept the same header layout as Gen4 [and should be a bit faster] */ assign_vue_slot(vue_map, VARYING_SLOT_PSIZ, slot++); assign_vue_slot(vue_map, BRW_VARYING_SLOT_NDC, slot++); assign_vue_slot(vue_map, VARYING_SLOT_POS, slot++); } else { /* There are 8 or 16 DWs (D0-D15) in VUE header on Sandybridge: * dword 0-3 of the header is indices, point width, clip flags. * dword 4-7 is the 4D space position * dword 8-15 of the vertex header is the user clip distance if * enabled. * dword 8-11 or 16-19 is the first vertex element data we fill. */ assign_vue_slot(vue_map, VARYING_SLOT_PSIZ, slot++); assign_vue_slot(vue_map, VARYING_SLOT_POS, slot++); if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0)) assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST0, slot++); if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1)) assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST1, slot++); /* front and back colors need to be consecutive so that we can use * ATTRIBUTE_SWIZZLE_INPUTATTR_FACING to swizzle them when doing * two-sided color. */ if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_COL0)) assign_vue_slot(vue_map, VARYING_SLOT_COL0, slot++); if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_BFC0)) assign_vue_slot(vue_map, VARYING_SLOT_BFC0, slot++); if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_COL1)) assign_vue_slot(vue_map, VARYING_SLOT_COL1, slot++); if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_BFC1)) assign_vue_slot(vue_map, VARYING_SLOT_BFC1, slot++); } /* The hardware doesn't care about the rest of the vertex outputs, so we * can assign them however we like. For normal programs, we simply assign * them contiguously. * * For separate shader pipelines, we first assign built-in varyings * contiguous slots. This works because ARB_separate_shader_objects * requires that all shaders have matching built-in varying interface * blocks. Next, we assign generic varyings based on their location * (either explicit or linker assigned). This guarantees a fixed layout. * * We generally don't need to assign a slot for VARYING_SLOT_CLIP_VERTEX, * since it's encoded as the clip distances by emit_clip_distances(). * However, it may be output by transform feedback, and we'd rather not * recompute state when TF changes, so we just always include it. */ GLbitfield64 builtins = slots_valid & BITFIELD64_MASK(VARYING_SLOT_VAR0); while (builtins != 0) { const int varying = ffsll(builtins) - 1; if (vue_map->varying_to_slot[varying] == -1) { assign_vue_slot(vue_map, varying, slot++); } builtins &= ~BITFIELD64_BIT(varying); } const int first_generic_slot = slot; GLbitfield64 generics = slots_valid & ~BITFIELD64_MASK(VARYING_SLOT_VAR0); while (generics != 0) { const int varying = ffsll(generics) - 1; if (separate) { slot = first_generic_slot + varying - VARYING_SLOT_VAR0; assign_vue_slot(vue_map, varying, slot); } else { assign_vue_slot(vue_map, varying, slot++); } generics &= ~BITFIELD64_BIT(varying); } vue_map->num_slots = separate ? slot + 1 : slot; vue_map->num_per_vertex_slots = 0; vue_map->num_per_patch_slots = 0; }