/* Distribute flatshaded attributes from provoking vertex prior to * clipping. */ void brw_clip_copy_flatshaded_attributes( struct brw_clip_compile *c, GLuint to, GLuint from ) { struct brw_codegen *p = &c->func; for (int i = 0; i < c->vue_map.num_slots; i++) { if (c->key.interpolation_mode.mode[i] == INTERP_QUALIFIER_FLAT) { brw_MOV(p, byte_offset(c->reg.vertex[to], brw_vue_slot_to_offset(i)), byte_offset(c->reg.vertex[from], brw_vue_slot_to_offset(i))); } } }
/* Interpolate between two vertices and put the result into a0.0. * Increment a0.0 accordingly. * * Beware that dest_ptr can be equal to v0_ptr! */ void brw_clip_interp_vertex( struct brw_clip_compile *c, struct brw_indirect dest_ptr, struct brw_indirect v0_ptr, /* from */ struct brw_indirect v1_ptr, /* to */ struct brw_reg t0, bool force_edgeflag) { struct brw_codegen *p = &c->func; struct brw_reg t_nopersp, v0_ndc_copy; GLuint slot; /* Just copy the vertex header: */ /* * After CLIP stage, only first 256 bits of the VUE are read * back on Ironlake, so needn't change it */ brw_copy_indirect_to_indirect(p, dest_ptr, v0_ptr, 1); /* First handle the 3D and NDC interpolation, in case we * need noperspective interpolation. Doing it early has no * performance impact in any case. */ /* Take a copy of the v0 NDC coordinates, in case dest == v0. */ if (c->has_noperspective_shading) { GLuint offset = brw_varying_to_offset(&c->vue_map, BRW_VARYING_SLOT_NDC); v0_ndc_copy = get_tmp(c); brw_MOV(p, v0_ndc_copy, deref_4f(v0_ptr, offset)); } /* Compute the new 3D position * * dest_hpos = v0_hpos * (1 - t0) + v1_hpos * t0 */ { GLuint delta = brw_varying_to_offset(&c->vue_map, VARYING_SLOT_POS); struct brw_reg tmp = get_tmp(c); brw_MUL(p, vec4(brw_null_reg()), deref_4f(v1_ptr, delta), t0); brw_MAC(p, tmp, negate(deref_4f(v0_ptr, delta)), t0); brw_ADD(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta), tmp); release_tmp(c, tmp); } /* Recreate the projected (NDC) coordinate in the new vertex header */ brw_clip_project_vertex(c, dest_ptr); /* If we have noperspective attributes, * we need to compute the screen-space t */ if (c->has_noperspective_shading) { GLuint delta = brw_varying_to_offset(&c->vue_map, BRW_VARYING_SLOT_NDC); struct brw_reg tmp = get_tmp(c); t_nopersp = get_tmp(c); /* t_nopersp = vec4(v1.xy, dest.xy) */ brw_MOV(p, t_nopersp, deref_4f(v1_ptr, delta)); brw_MOV(p, tmp, deref_4f(dest_ptr, delta)); brw_set_default_access_mode(p, BRW_ALIGN_16); brw_MOV(p, brw_writemask(t_nopersp, WRITEMASK_ZW), brw_swizzle(tmp, 0, 1, 0, 1)); /* t_nopersp = vec4(v1.xy, dest.xy) - v0.xyxy */ brw_ADD(p, t_nopersp, t_nopersp, negate(brw_swizzle(v0_ndc_copy, 0, 1, 0, 1))); /* Add the absolute values of the X and Y deltas so that if * the points aren't in the same place on the screen we get * nonzero values to divide. * * After that, we have vert1 - vert0 in t_nopersp.x and * vertnew - vert0 in t_nopersp.y * * t_nopersp = vec2(|v1.x -v0.x| + |v1.y -v0.y|, * |dest.x-v0.x| + |dest.y-v0.y|) */ brw_ADD(p, brw_writemask(t_nopersp, WRITEMASK_XY), brw_abs(brw_swizzle(t_nopersp, 0, 2, 0, 0)), brw_abs(brw_swizzle(t_nopersp, 1, 3, 0, 0))); brw_set_default_access_mode(p, BRW_ALIGN_1); /* If the points are in the same place, just substitute a * value to avoid divide-by-zero */ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_EQ, vec1(t_nopersp), brw_imm_f(0)); brw_IF(p, BRW_EXECUTE_1); brw_MOV(p, t_nopersp, brw_imm_vf4(brw_float_to_vf(1.0), brw_float_to_vf(0.0), brw_float_to_vf(0.0), brw_float_to_vf(0.0))); brw_ENDIF(p); /* Now compute t_nopersp = t_nopersp.y/t_nopersp.x and broadcast it. */ brw_math_invert(p, get_element(t_nopersp, 0), get_element(t_nopersp, 0)); brw_MUL(p, vec1(t_nopersp), vec1(t_nopersp), vec1(suboffset(t_nopersp, 1))); brw_set_default_access_mode(p, BRW_ALIGN_16); brw_MOV(p, t_nopersp, brw_swizzle(t_nopersp, 0, 0, 0, 0)); brw_set_default_access_mode(p, BRW_ALIGN_1); release_tmp(c, tmp); release_tmp(c, v0_ndc_copy); } /* Now we can iterate over each attribute * (could be done in pairs?) */ for (slot = 0; slot < c->vue_map.num_slots; slot++) { int varying = c->vue_map.slot_to_varying[slot]; GLuint delta = brw_vue_slot_to_offset(slot); /* HPOS, NDC already handled above */ if (varying == VARYING_SLOT_POS || varying == BRW_VARYING_SLOT_NDC) continue; if (varying == VARYING_SLOT_EDGE) { if (force_edgeflag) brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(1)); else brw_MOV(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta)); } else if (varying == VARYING_SLOT_PSIZ) { /* PSIZ doesn't need interpolation because it isn't used by the * fragment shader. */ } else if (varying < VARYING_SLOT_MAX) { /* This is a true vertex result (and not a special value for the VUE * header), so interpolate: * * New = attr0 + t*attr1 - t*attr0 * * Unless the attribute is flat shaded -- in which case just copy * from one of the sources (doesn't matter which; already copied from pv) */ GLuint interp = c->key.interpolation_mode.mode[slot]; if (interp != INTERP_QUALIFIER_FLAT) { struct brw_reg tmp = get_tmp(c); struct brw_reg t = interp == INTERP_QUALIFIER_NOPERSPECTIVE ? t_nopersp : t0; brw_MUL(p, vec4(brw_null_reg()), deref_4f(v1_ptr, delta), t); brw_MAC(p, tmp, negate(deref_4f(v0_ptr, delta)), t); brw_ADD(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta), tmp); release_tmp(c, tmp); } else { brw_MOV(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta)); } } } if (c->vue_map.num_slots % 2) { GLuint delta = brw_vue_slot_to_offset(c->vue_map.num_slots); brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(0)); } if (c->has_noperspective_shading) release_tmp(c, t_nopersp); }
/* Set up interpolation modes for every element in the VUE */ static void brw_setup_vue_interpolation(struct brw_context *brw) { const struct gl_fragment_program *fprog = brw->fragment_program; struct brw_vue_map *vue_map = &brw->vue_map_geom_out; memset(&brw->interpolation_mode, INTERP_QUALIFIER_NONE, sizeof(brw->interpolation_mode)); brw->ctx.NewDriverState |= BRW_NEW_INTERPOLATION_MAP; if (!fprog) return; for (int i = 0; i < vue_map->num_slots; i++) { int varying = vue_map->slot_to_varying[i]; if (varying == -1) continue; /* HPOS always wants noperspective. setting it up here allows * us to not need special handling in the SF program. */ if (varying == VARYING_SLOT_POS) { brw->interpolation_mode.mode[i] = INTERP_QUALIFIER_NOPERSPECTIVE; continue; } int frag_attrib = varying; if (varying == VARYING_SLOT_BFC0 || varying == VARYING_SLOT_BFC1) frag_attrib = varying - VARYING_SLOT_BFC0 + VARYING_SLOT_COL0; if (!(fprog->Base.InputsRead & BITFIELD64_BIT(frag_attrib))) continue; enum glsl_interp_qualifier mode = fprog->InterpQualifier[frag_attrib]; /* If the mode is not specified, the default varies: Color values * follow GL_SHADE_MODEL; everything else is smooth. */ if (mode == INTERP_QUALIFIER_NONE) { if (frag_attrib == VARYING_SLOT_COL0 || frag_attrib == VARYING_SLOT_COL1) mode = brw->ctx.Light.ShadeModel == GL_FLAT ? INTERP_QUALIFIER_FLAT : INTERP_QUALIFIER_SMOOTH; else mode = INTERP_QUALIFIER_SMOOTH; } brw->interpolation_mode.mode[i] = mode; } if (unlikely(INTEL_DEBUG & DEBUG_VUE)) { fprintf(stderr, "VUE map:\n"); for (int i = 0; i < vue_map->num_slots; i++) { int varying = vue_map->slot_to_varying[i]; if (varying == -1) { fprintf(stderr, "%d: --\n", i); continue; } fprintf(stderr, "%d: %d %s ofs %d\n", i, varying, get_qual_name(brw->interpolation_mode.mode[i]), brw_vue_slot_to_offset(i)); } } }
void brw_clip_tri_alloc_regs( struct brw_clip_compile *c, GLuint nr_verts ) { struct intel_context *intel = &c->func.brw->intel; GLuint i = 0,j; /* Register usage is static, precompute here: */ c->reg.R0 = retype(brw_vec8_grf(i, 0), BRW_REGISTER_TYPE_UD); i++; if (c->key.nr_userclip) { c->reg.fixed_planes = brw_vec4_grf(i, 0); i += (6 + c->key.nr_userclip + 1) / 2; c->prog_data.curb_read_length = (6 + c->key.nr_userclip + 1) / 2; } else c->prog_data.curb_read_length = 0; /* Payload vertices plus space for more generated vertices: */ for (j = 0; j < nr_verts; j++) { c->reg.vertex[j] = brw_vec4_grf(i, 0); i += c->nr_regs; } if (c->vue_map.num_slots % 2) { /* The VUE has an odd number of slots so the last register is only half * used. Fill the second half with zero. */ for (j = 0; j < 3; j++) { GLuint delta = brw_vue_slot_to_offset(c->vue_map.num_slots); brw_MOV(&c->func, byte_offset(c->reg.vertex[j], delta), brw_imm_f(0)); } } c->reg.t = brw_vec1_grf(i, 0); c->reg.loopcount = retype(brw_vec1_grf(i, 1), BRW_REGISTER_TYPE_D); c->reg.nr_verts = retype(brw_vec1_grf(i, 2), BRW_REGISTER_TYPE_UD); c->reg.planemask = retype(brw_vec1_grf(i, 3), BRW_REGISTER_TYPE_UD); c->reg.plane_equation = brw_vec4_grf(i, 4); i++; c->reg.dpPrev = brw_vec1_grf(i, 0); /* fixme - dp4 will clobber r.1,2,3 */ c->reg.dp = brw_vec1_grf(i, 4); i++; c->reg.inlist = brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, i, 0); i++; c->reg.outlist = brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, i, 0); i++; c->reg.freelist = brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, i, 0); i++; if (!c->key.nr_userclip) { c->reg.fixed_planes = brw_vec8_grf(i, 0); i++; } if (c->key.do_unfilled) { c->reg.dir = brw_vec4_grf(i, 0); c->reg.offset = brw_vec4_grf(i, 4); i++; c->reg.tmp0 = brw_vec4_grf(i, 0); c->reg.tmp1 = brw_vec4_grf(i, 4); i++; } if (intel->needs_ff_sync) { c->reg.ff_sync = retype(brw_vec1_grf(i, 0), BRW_REGISTER_TYPE_UD); i++; } c->first_tmp = i; c->last_tmp = i; c->prog_data.urb_read_length = c->nr_regs; /* ? */ c->prog_data.total_grf = i; }
/* Interpolate between two vertices and put the result into a0.0. * Increment a0.0 accordingly. */ void brw_clip_interp_vertex( struct brw_clip_compile *c, struct brw_indirect dest_ptr, struct brw_indirect v0_ptr, /* from */ struct brw_indirect v1_ptr, /* to */ struct brw_reg t0, bool force_edgeflag) { struct brw_compile *p = &c->func; struct brw_reg tmp = get_tmp(c); GLuint slot; /* Just copy the vertex header: */ /* * After CLIP stage, only first 256 bits of the VUE are read * back on Ironlake, so needn't change it */ brw_copy_indirect_to_indirect(p, dest_ptr, v0_ptr, 1); /* Iterate over each attribute (could be done in pairs?) */ for (slot = 0; slot < c->vue_map.num_slots; slot++) { int varying = c->vue_map.slot_to_varying[slot]; GLuint delta = brw_vue_slot_to_offset(slot); if (varying == VARYING_SLOT_EDGE) { if (force_edgeflag) brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(1)); else brw_MOV(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta)); } else if (varying == VARYING_SLOT_PSIZ || varying == VARYING_SLOT_CLIP_DIST0 || varying == VARYING_SLOT_CLIP_DIST1) { /* PSIZ doesn't need interpolation because it isn't used by the * fragment shader. CLIP_DIST0 and CLIP_DIST1 don't need * intepolation because on pre-GEN6, these are just placeholder VUE * slots that don't perform any action. */ } else if (varying < VARYING_SLOT_MAX) { /* This is a true vertex result (and not a special value for the VUE * header), so interpolate: * * New = attr0 + t*attr1 - t*attr0 */ brw_MUL(p, vec4(brw_null_reg()), deref_4f(v1_ptr, delta), t0); brw_MAC(p, tmp, negate(deref_4f(v0_ptr, delta)), t0); brw_ADD(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta), tmp); } } if (c->vue_map.num_slots % 2) { GLuint delta = brw_vue_slot_to_offset(c->vue_map.num_slots); brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(0)); } release_tmp(c, tmp); /* Recreate the projected (NDC) coordinate in the new vertex * header: */ brw_clip_project_vertex(c, dest_ptr ); }