/* GLfloat iz = 1.0 / dir.z; GLfloat ac = dir.x * iz; GLfloat bc = dir.y * iz; offset = ctx->Polygon.OffsetUnits * DEPTH_SCALE; offset += MAX2( abs(ac), abs(bc) ) * ctx->Polygon.OffsetFactor; offset *= MRD; */ static void compute_offset( struct brw_clip_compile *c ) { struct brw_compile *p = &c->func; struct brw_reg off = c->reg.offset; struct brw_reg dir = c->reg.dir; brw_math_invert(p, get_element(off, 2), get_element(dir, 2)); brw_MUL(p, vec2(off), dir, get_element(off, 2)); brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_GE, brw_abs(get_element(off, 0)), brw_abs(get_element(off, 1))); brw_SEL(p, vec1(off), brw_abs(get_element(off, 0)), brw_abs(get_element(off, 1))); brw_set_predicate_control(p, BRW_PREDICATE_NONE); brw_MUL(p, vec1(off), off, brw_imm_f(c->key.offset_factor)); brw_ADD(p, vec1(off), off, brw_imm_f(c->key.offset_units)); }
static void emit_mad(struct brw_wm_compile *c, struct prog_instruction *inst) { struct brw_compile *p = &c->func; GLuint mask = inst->DstReg.WriteMask; struct brw_reg dst, src0, src1, src2; int i; for (i = 0; i < 4; i++) { if (mask & (1<<i)) { dst = get_dst_reg(c, inst, i, 1); src0 = get_src_reg(c, &inst->SrcReg[0], i, 1); src1 = get_src_reg(c, &inst->SrcReg[1], i, 1); src2 = get_src_reg(c, &inst->SrcReg[2], i, 1); brw_MUL(p, dst, src0, src1); brw_set_saturate(p, (inst->SaturateMode != SATURATE_OFF) ? 1 : 0); brw_ADD(p, dst, dst, src2); brw_set_saturate(p, 0); } } }
void emit_lrp(struct brw_compile *p, const struct brw_reg *dst, GLuint mask, const struct brw_reg *arg0, const struct brw_reg *arg1, const struct brw_reg *arg2) { GLuint i; /* Uses dst as a temporary: */ for (i = 0; i < 4; i++) { if (mask & (1<<i)) { /* Can I use the LINE instruction for this? */ brw_ADD(p, dst[i], negate(arg0[i]), brw_imm_f(1.0)); brw_MUL(p, brw_null_reg(), dst[i], arg2[i]); brw_set_saturate(p, (mask & SATURATE) ? 1 : 0); brw_MAC(p, dst[i], arg0[i], arg1[i]); brw_set_saturate(p, 0); } } }
static void emit_lrp(struct brw_wm_compile *c, struct prog_instruction *inst) { struct brw_compile *p = &c->func; GLuint mask = inst->DstReg.WriteMask; struct brw_reg dst, tmp1, tmp2, src0, src1, src2; int i; for (i = 0; i < 4; i++) { if (mask & (1<<i)) { dst = get_dst_reg(c, inst, i, 1); src0 = get_src_reg(c, &inst->SrcReg[0], i, 1); src1 = get_src_reg(c, &inst->SrcReg[1], i, 1); if (src1.nr == dst.nr) { tmp1 = alloc_tmp(c); brw_MOV(p, tmp1, src1); } else tmp1 = src1; src2 = get_src_reg(c, &inst->SrcReg[2], i, 1); if (src2.nr == dst.nr) { tmp2 = alloc_tmp(c); brw_MOV(p, tmp2, src2); } else tmp2 = src2; brw_ADD(p, dst, negate(src0), brw_imm_f(1.0)); brw_MUL(p, brw_null_reg(), dst, tmp2); brw_set_saturate(p, (inst->SaturateMode != SATURATE_OFF) ? 1 : 0); brw_MAC(p, dst, src0, tmp1); brw_set_saturate(p, 0); } release_tmps(c); } }
void vec4_generator::generate_oword_dual_block_offsets(struct brw_reg m1, struct brw_reg index) { int second_vertex_offset; if (brw->gen >= 6) second_vertex_offset = 1; else second_vertex_offset = 16; m1 = retype(m1, BRW_REGISTER_TYPE_D); /* Set up M1 (message payload). Only the block offsets in M1.0 and * M1.4 are used, and the rest are ignored. */ struct brw_reg m1_0 = suboffset(vec1(m1), 0); struct brw_reg m1_4 = suboffset(vec1(m1), 4); struct brw_reg index_0 = suboffset(vec1(index), 0); struct brw_reg index_4 = suboffset(vec1(index), 4); brw_push_insn_state(p); brw_set_mask_control(p, BRW_MASK_DISABLE); brw_set_access_mode(p, BRW_ALIGN_1); brw_MOV(p, m1_0, index_0); if (index.file == BRW_IMMEDIATE_VALUE) { index_4.dw1.ud += second_vertex_offset; brw_MOV(p, m1_4, index_4); } else { brw_ADD(p, m1_4, index_4, brw_imm_d(second_vertex_offset)); } brw_pop_insn_state(p); }
/** * Loads the clip distance for a vertex into `dst`, and ends with * a comparison of it to zero with the condition `cond`. * * - If using a fixed plane, the distance is dot(hpos, plane). * - If using a user clip plane, the distance is directly available in the vertex. */ static inline void load_clip_distance(struct brw_clip_compile *c, struct brw_indirect vtx, struct brw_reg dst, GLuint hpos_offset, int cond) { struct brw_codegen *p = &c->func; dst = vec4(dst); brw_AND(p, vec1(brw_null_reg()), c->reg.vertex_src_mask, brw_imm_ud(1)); brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_NZ); brw_IF(p, BRW_EXECUTE_1); { struct brw_indirect temp_ptr = brw_indirect(7, 0); brw_ADD(p, get_addr_reg(temp_ptr), get_addr_reg(vtx), c->reg.clipdistance_offset); brw_MOV(p, vec1(dst), deref_1f(temp_ptr, 0)); } brw_ELSE(p); { brw_MOV(p, dst, deref_4f(vtx, hpos_offset)); brw_DP4(p, dst, dst, c->reg.plane_equation); } brw_ENDIF(p); brw_CMP(p, brw_null_reg(), cond, vec1(dst), brw_imm_f(0.0f)); }
/* Use mesa's clipping algorithms, translated to GEN4 assembly. */ void brw_clip_tri( struct brw_clip_compile *c ) { struct brw_codegen *p = &c->func; struct brw_indirect vtx = brw_indirect(0, 0); struct brw_indirect vtxPrev = brw_indirect(1, 0); struct brw_indirect vtxOut = brw_indirect(2, 0); struct brw_indirect plane_ptr = brw_indirect(3, 0); struct brw_indirect inlist_ptr = brw_indirect(4, 0); struct brw_indirect outlist_ptr = brw_indirect(5, 0); struct brw_indirect freelist_ptr = brw_indirect(6, 0); GLuint hpos_offset = brw_varying_to_offset(&c->vue_map, VARYING_SLOT_POS); GLint clipdist0_offset = c->key.nr_userclip ? brw_varying_to_offset(&c->vue_map, VARYING_SLOT_CLIP_DIST0) : 0; brw_MOV(p, get_addr_reg(vtxPrev), brw_address(c->reg.vertex[2]) ); brw_MOV(p, get_addr_reg(plane_ptr), brw_clip_plane0_address(c)); brw_MOV(p, get_addr_reg(inlist_ptr), brw_address(c->reg.inlist)); brw_MOV(p, get_addr_reg(outlist_ptr), brw_address(c->reg.outlist)); brw_MOV(p, get_addr_reg(freelist_ptr), brw_address(c->reg.vertex[3]) ); /* Set the initial vertex source mask: The first 6 planes are the bounds * of the view volume; the next 8 planes are the user clipping planes. */ brw_MOV(p, c->reg.vertex_src_mask, brw_imm_ud(0x3fc0)); /* Set the initial clipdistance offset to be 6 floats before gl_ClipDistance[0]. * We'll increment 6 times before we start hitting actual user clipping. */ brw_MOV(p, c->reg.clipdistance_offset, brw_imm_d(clipdist0_offset - 6*sizeof(float))); brw_DO(p, BRW_EXECUTE_1); { /* if (planemask & 1) */ brw_AND(p, vec1(brw_null_reg()), c->reg.planemask, brw_imm_ud(1)); brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_NZ); brw_IF(p, BRW_EXECUTE_1); { /* vtxOut = freelist_ptr++ */ brw_MOV(p, get_addr_reg(vtxOut), get_addr_reg(freelist_ptr) ); brw_ADD(p, get_addr_reg(freelist_ptr), get_addr_reg(freelist_ptr), brw_imm_uw(c->nr_regs * REG_SIZE)); if (c->key.nr_userclip) brw_MOV(p, c->reg.plane_equation, deref_4f(plane_ptr, 0)); else brw_MOV(p, c->reg.plane_equation, deref_4b(plane_ptr, 0)); brw_MOV(p, c->reg.loopcount, c->reg.nr_verts); brw_MOV(p, c->reg.nr_verts, brw_imm_ud(0)); brw_DO(p, BRW_EXECUTE_1); { /* vtx = *input_ptr; */ brw_MOV(p, get_addr_reg(vtx), deref_1uw(inlist_ptr, 0)); load_clip_distance(c, vtxPrev, c->reg.dpPrev, hpos_offset, BRW_CONDITIONAL_L); /* (prev < 0.0f) */ brw_IF(p, BRW_EXECUTE_1); { load_clip_distance(c, vtx, c->reg.dp, hpos_offset, BRW_CONDITIONAL_GE); /* IS_POSITIVE(next) */ brw_IF(p, BRW_EXECUTE_1); { /* Coming back in. */ brw_ADD(p, c->reg.t, c->reg.dpPrev, negate(c->reg.dp)); brw_math_invert(p, c->reg.t, c->reg.t); brw_MUL(p, c->reg.t, c->reg.t, c->reg.dpPrev); /* If (vtxOut == 0) vtxOut = vtxPrev */ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_EQ, get_addr_reg(vtxOut), brw_imm_uw(0) ); brw_MOV(p, get_addr_reg(vtxOut), get_addr_reg(vtxPrev)); brw_inst_set_pred_control(p->devinfo, brw_last_inst, BRW_PREDICATE_NORMAL); brw_clip_interp_vertex(c, vtxOut, vtxPrev, vtx, c->reg.t, false); /* *outlist_ptr++ = vtxOut; * nr_verts++; * vtxOut = 0; */ brw_MOV(p, deref_1uw(outlist_ptr, 0), get_addr_reg(vtxOut)); brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_uw(sizeof(short))); brw_ADD(p, c->reg.nr_verts, c->reg.nr_verts, brw_imm_ud(1)); brw_MOV(p, get_addr_reg(vtxOut), brw_imm_uw(0) ); } brw_ENDIF(p); } brw_ELSE(p); { /* *outlist_ptr++ = vtxPrev; * nr_verts++; */ brw_MOV(p, deref_1uw(outlist_ptr, 0), get_addr_reg(vtxPrev)); brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_uw(sizeof(short))); brw_ADD(p, c->reg.nr_verts, c->reg.nr_verts, brw_imm_ud(1)); load_clip_distance(c, vtx, c->reg.dp, hpos_offset, BRW_CONDITIONAL_L); /* (next < 0.0f) */ brw_IF(p, BRW_EXECUTE_1); { /* Going out of bounds. Avoid division by zero as we * know dp != dpPrev from DIFFERENT_SIGNS, above. */ brw_ADD(p, c->reg.t, c->reg.dp, negate(c->reg.dpPrev)); brw_math_invert(p, c->reg.t, c->reg.t); brw_MUL(p, c->reg.t, c->reg.t, c->reg.dp); /* If (vtxOut == 0) vtxOut = vtx */ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_EQ, get_addr_reg(vtxOut), brw_imm_uw(0) ); brw_MOV(p, get_addr_reg(vtxOut), get_addr_reg(vtx)); brw_inst_set_pred_control(p->devinfo, brw_last_inst, BRW_PREDICATE_NORMAL); brw_clip_interp_vertex(c, vtxOut, vtx, vtxPrev, c->reg.t, true); /* *outlist_ptr++ = vtxOut; * nr_verts++; * vtxOut = 0; */ brw_MOV(p, deref_1uw(outlist_ptr, 0), get_addr_reg(vtxOut)); brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_uw(sizeof(short))); brw_ADD(p, c->reg.nr_verts, c->reg.nr_verts, brw_imm_ud(1)); brw_MOV(p, get_addr_reg(vtxOut), brw_imm_uw(0) ); } brw_ENDIF(p); } brw_ENDIF(p); /* vtxPrev = vtx; * inlist_ptr++; */ brw_MOV(p, get_addr_reg(vtxPrev), get_addr_reg(vtx)); brw_ADD(p, get_addr_reg(inlist_ptr), get_addr_reg(inlist_ptr), brw_imm_uw(sizeof(short))); /* while (--loopcount != 0) */ brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1)); brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_NZ); } brw_WHILE(p); brw_inst_set_pred_control(p->devinfo, brw_last_inst, BRW_PREDICATE_NORMAL); /* vtxPrev = *(outlist_ptr-1) OR: outlist[nr_verts-1] * inlist = outlist * inlist_ptr = &inlist[0] * outlist_ptr = &outlist[0] */ brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_w(-2)); brw_MOV(p, get_addr_reg(vtxPrev), deref_1uw(outlist_ptr, 0)); brw_MOV(p, brw_vec8_grf(c->reg.inlist.nr, 0), brw_vec8_grf(c->reg.outlist.nr, 0)); brw_MOV(p, get_addr_reg(inlist_ptr), brw_address(c->reg.inlist)); brw_MOV(p, get_addr_reg(outlist_ptr), brw_address(c->reg.outlist)); } brw_ENDIF(p); /* plane_ptr++; */ brw_ADD(p, get_addr_reg(plane_ptr), get_addr_reg(plane_ptr), brw_clip_plane_stride(c)); /* nr_verts >= 3 */ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_GE, c->reg.nr_verts, brw_imm_ud(3)); brw_set_default_predicate_control(p, BRW_PREDICATE_NORMAL); /* && (planemask>>=1) != 0 */ brw_SHR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud(1)); brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_NZ); brw_SHR(p, c->reg.vertex_src_mask, c->reg.vertex_src_mask, brw_imm_ud(1)); brw_ADD(p, c->reg.clipdistance_offset, c->reg.clipdistance_offset, brw_imm_w(sizeof(float))); } brw_WHILE(p); brw_set_default_predicate_control(p, BRW_PREDICATE_NONE); }
/* Interpolate between two vertices and put the result into a0.0. * Increment a0.0 accordingly. * * Beware that dest_ptr can be equal to v0_ptr! */ void brw_clip_interp_vertex( struct brw_clip_compile *c, struct brw_indirect dest_ptr, struct brw_indirect v0_ptr, /* from */ struct brw_indirect v1_ptr, /* to */ struct brw_reg t0, bool force_edgeflag) { struct brw_codegen *p = &c->func; struct brw_reg t_nopersp, v0_ndc_copy; GLuint slot; /* Just copy the vertex header: */ /* * After CLIP stage, only first 256 bits of the VUE are read * back on Ironlake, so needn't change it */ brw_copy_indirect_to_indirect(p, dest_ptr, v0_ptr, 1); /* First handle the 3D and NDC interpolation, in case we * need noperspective interpolation. Doing it early has no * performance impact in any case. */ /* Take a copy of the v0 NDC coordinates, in case dest == v0. */ if (c->has_noperspective_shading) { GLuint offset = brw_varying_to_offset(&c->vue_map, BRW_VARYING_SLOT_NDC); v0_ndc_copy = get_tmp(c); brw_MOV(p, v0_ndc_copy, deref_4f(v0_ptr, offset)); } /* Compute the new 3D position * * dest_hpos = v0_hpos * (1 - t0) + v1_hpos * t0 */ { GLuint delta = brw_varying_to_offset(&c->vue_map, VARYING_SLOT_POS); struct brw_reg tmp = get_tmp(c); brw_MUL(p, vec4(brw_null_reg()), deref_4f(v1_ptr, delta), t0); brw_MAC(p, tmp, negate(deref_4f(v0_ptr, delta)), t0); brw_ADD(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta), tmp); release_tmp(c, tmp); } /* Recreate the projected (NDC) coordinate in the new vertex header */ brw_clip_project_vertex(c, dest_ptr); /* If we have noperspective attributes, * we need to compute the screen-space t */ if (c->has_noperspective_shading) { GLuint delta = brw_varying_to_offset(&c->vue_map, BRW_VARYING_SLOT_NDC); struct brw_reg tmp = get_tmp(c); t_nopersp = get_tmp(c); /* t_nopersp = vec4(v1.xy, dest.xy) */ brw_MOV(p, t_nopersp, deref_4f(v1_ptr, delta)); brw_MOV(p, tmp, deref_4f(dest_ptr, delta)); brw_set_default_access_mode(p, BRW_ALIGN_16); brw_MOV(p, brw_writemask(t_nopersp, WRITEMASK_ZW), brw_swizzle(tmp, 0, 1, 0, 1)); /* t_nopersp = vec4(v1.xy, dest.xy) - v0.xyxy */ brw_ADD(p, t_nopersp, t_nopersp, negate(brw_swizzle(v0_ndc_copy, 0, 1, 0, 1))); /* Add the absolute values of the X and Y deltas so that if * the points aren't in the same place on the screen we get * nonzero values to divide. * * After that, we have vert1 - vert0 in t_nopersp.x and * vertnew - vert0 in t_nopersp.y * * t_nopersp = vec2(|v1.x -v0.x| + |v1.y -v0.y|, * |dest.x-v0.x| + |dest.y-v0.y|) */ brw_ADD(p, brw_writemask(t_nopersp, WRITEMASK_XY), brw_abs(brw_swizzle(t_nopersp, 0, 2, 0, 0)), brw_abs(brw_swizzle(t_nopersp, 1, 3, 0, 0))); brw_set_default_access_mode(p, BRW_ALIGN_1); /* If the points are in the same place, just substitute a * value to avoid divide-by-zero */ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_EQ, vec1(t_nopersp), brw_imm_f(0)); brw_IF(p, BRW_EXECUTE_1); brw_MOV(p, t_nopersp, brw_imm_vf4(brw_float_to_vf(1.0), brw_float_to_vf(0.0), brw_float_to_vf(0.0), brw_float_to_vf(0.0))); brw_ENDIF(p); /* Now compute t_nopersp = t_nopersp.y/t_nopersp.x and broadcast it. */ brw_math_invert(p, get_element(t_nopersp, 0), get_element(t_nopersp, 0)); brw_MUL(p, vec1(t_nopersp), vec1(t_nopersp), vec1(suboffset(t_nopersp, 1))); brw_set_default_access_mode(p, BRW_ALIGN_16); brw_MOV(p, t_nopersp, brw_swizzle(t_nopersp, 0, 0, 0, 0)); brw_set_default_access_mode(p, BRW_ALIGN_1); release_tmp(c, tmp); release_tmp(c, v0_ndc_copy); } /* Now we can iterate over each attribute * (could be done in pairs?) */ for (slot = 0; slot < c->vue_map.num_slots; slot++) { int varying = c->vue_map.slot_to_varying[slot]; GLuint delta = brw_vue_slot_to_offset(slot); /* HPOS, NDC already handled above */ if (varying == VARYING_SLOT_POS || varying == BRW_VARYING_SLOT_NDC) continue; if (varying == VARYING_SLOT_EDGE) { if (force_edgeflag) brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(1)); else brw_MOV(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta)); } else if (varying == VARYING_SLOT_PSIZ) { /* PSIZ doesn't need interpolation because it isn't used by the * fragment shader. */ } else if (varying < VARYING_SLOT_MAX) { /* This is a true vertex result (and not a special value for the VUE * header), so interpolate: * * New = attr0 + t*attr1 - t*attr0 * * Unless the attribute is flat shaded -- in which case just copy * from one of the sources (doesn't matter which; already copied from pv) */ GLuint interp = c->key.interpolation_mode.mode[slot]; if (interp != INTERP_QUALIFIER_FLAT) { struct brw_reg tmp = get_tmp(c); struct brw_reg t = interp == INTERP_QUALIFIER_NOPERSPECTIVE ? t_nopersp : t0; brw_MUL(p, vec4(brw_null_reg()), deref_4f(v1_ptr, delta), t); brw_MAC(p, tmp, negate(deref_4f(v0_ptr, delta)), t); brw_ADD(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta), tmp); release_tmp(c, tmp); } else { brw_MOV(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta)); } } } if (c->vue_map.num_slots % 2) { GLuint delta = brw_vue_slot_to_offset(c->vue_map.num_slots); brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(0)); } if (c->has_noperspective_shading) release_tmp(c, t_nopersp); }
/** * Generate assembly for a Vec4 IR instruction. * * \param instruction The Vec4 IR instruction to generate code for. * \param dst The destination register. * \param src An array of up to three source registers. */ void vec4_generator::generate_vec4_instruction(vec4_instruction *instruction, struct brw_reg dst, struct brw_reg *src) { vec4_instruction *inst = (vec4_instruction *) instruction; if (dst.width == BRW_WIDTH_4) { /* This happens in attribute fixups for "dual instanced" geometry * shaders, since they use attributes that are vec4's. Since the exec * width is only 4, it's essential that the caller set * force_writemask_all in order to make sure the instruction is executed * regardless of which channels are enabled. */ assert(inst->force_writemask_all); /* Fix up any <8;8,1> or <0;4,1> source registers to <4;4,1> to satisfy * the following register region restrictions (from Graphics BSpec: * 3D-Media-GPGPU Engine > EU Overview > Registers and Register Regions * > Register Region Restrictions) * * 1. ExecSize must be greater than or equal to Width. * * 2. If ExecSize = Width and HorzStride != 0, VertStride must be set * to Width * HorzStride." */ for (int i = 0; i < 3; i++) { if (src[i].file == BRW_GENERAL_REGISTER_FILE) src[i] = stride(src[i], 4, 4, 1); } } switch (inst->opcode) { case BRW_OPCODE_MOV: brw_MOV(p, dst, src[0]); break; case BRW_OPCODE_ADD: brw_ADD(p, dst, src[0], src[1]); break; case BRW_OPCODE_MUL: brw_MUL(p, dst, src[0], src[1]); break; case BRW_OPCODE_MACH: brw_set_acc_write_control(p, 1); brw_MACH(p, dst, src[0], src[1]); brw_set_acc_write_control(p, 0); break; case BRW_OPCODE_MAD: assert(brw->gen >= 6); brw_MAD(p, dst, src[0], src[1], src[2]); break; case BRW_OPCODE_FRC: brw_FRC(p, dst, src[0]); break; case BRW_OPCODE_RNDD: brw_RNDD(p, dst, src[0]); break; case BRW_OPCODE_RNDE: brw_RNDE(p, dst, src[0]); break; case BRW_OPCODE_RNDZ: brw_RNDZ(p, dst, src[0]); break; case BRW_OPCODE_AND: brw_AND(p, dst, src[0], src[1]); break; case BRW_OPCODE_OR: brw_OR(p, dst, src[0], src[1]); break; case BRW_OPCODE_XOR: brw_XOR(p, dst, src[0], src[1]); break; case BRW_OPCODE_NOT: brw_NOT(p, dst, src[0]); break; case BRW_OPCODE_ASR: brw_ASR(p, dst, src[0], src[1]); break; case BRW_OPCODE_SHR: brw_SHR(p, dst, src[0], src[1]); break; case BRW_OPCODE_SHL: brw_SHL(p, dst, src[0], src[1]); break; case BRW_OPCODE_CMP: brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]); break; case BRW_OPCODE_SEL: brw_SEL(p, dst, src[0], src[1]); break; case BRW_OPCODE_DPH: brw_DPH(p, dst, src[0], src[1]); break; case BRW_OPCODE_DP4: brw_DP4(p, dst, src[0], src[1]); break; case BRW_OPCODE_DP3: brw_DP3(p, dst, src[0], src[1]); break; case BRW_OPCODE_DP2: brw_DP2(p, dst, src[0], src[1]); break; case BRW_OPCODE_F32TO16: assert(brw->gen >= 7); brw_F32TO16(p, dst, src[0]); break; case BRW_OPCODE_F16TO32: assert(brw->gen >= 7); brw_F16TO32(p, dst, src[0]); break; case BRW_OPCODE_LRP: assert(brw->gen >= 6); brw_LRP(p, dst, src[0], src[1], src[2]); break; case BRW_OPCODE_BFREV: assert(brw->gen >= 7); /* BFREV only supports UD type for src and dst. */ brw_BFREV(p, retype(dst, BRW_REGISTER_TYPE_UD), retype(src[0], BRW_REGISTER_TYPE_UD)); break; case BRW_OPCODE_FBH: assert(brw->gen >= 7); /* FBH only supports UD type for dst. */ brw_FBH(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]); break; case BRW_OPCODE_FBL: assert(brw->gen >= 7); /* FBL only supports UD type for dst. */ brw_FBL(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]); break; case BRW_OPCODE_CBIT: assert(brw->gen >= 7); /* CBIT only supports UD type for dst. */ brw_CBIT(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]); break; case BRW_OPCODE_ADDC: assert(brw->gen >= 7); brw_set_acc_write_control(p, 1); brw_ADDC(p, dst, src[0], src[1]); brw_set_acc_write_control(p, 0); break; case BRW_OPCODE_SUBB: assert(brw->gen >= 7); brw_set_acc_write_control(p, 1); brw_SUBB(p, dst, src[0], src[1]); brw_set_acc_write_control(p, 0); break; case BRW_OPCODE_BFE: assert(brw->gen >= 7); brw_BFE(p, dst, src[0], src[1], src[2]); break; case BRW_OPCODE_BFI1: assert(brw->gen >= 7); brw_BFI1(p, dst, src[0], src[1]); break; case BRW_OPCODE_BFI2: assert(brw->gen >= 7); brw_BFI2(p, dst, src[0], src[1], src[2]); break; case BRW_OPCODE_IF: if (inst->src[0].file != BAD_FILE) { /* The instruction has an embedded compare (only allowed on gen6) */ assert(brw->gen == 6); gen6_IF(p, inst->conditional_mod, src[0], src[1]); } else { struct brw_instruction *brw_inst = brw_IF(p, BRW_EXECUTE_8); brw_inst->header.predicate_control = inst->predicate; } break; case BRW_OPCODE_ELSE: brw_ELSE(p); break; case BRW_OPCODE_ENDIF: brw_ENDIF(p); break; case BRW_OPCODE_DO: brw_DO(p, BRW_EXECUTE_8); break; case BRW_OPCODE_BREAK: brw_BREAK(p); brw_set_predicate_control(p, BRW_PREDICATE_NONE); break; case BRW_OPCODE_CONTINUE: /* FINISHME: We need to write the loop instruction support still. */ if (brw->gen >= 6) gen6_CONT(p); else brw_CONT(p); brw_set_predicate_control(p, BRW_PREDICATE_NONE); break; case BRW_OPCODE_WHILE: brw_WHILE(p); break; case SHADER_OPCODE_RCP: case SHADER_OPCODE_RSQ: case SHADER_OPCODE_SQRT: case SHADER_OPCODE_EXP2: case SHADER_OPCODE_LOG2: case SHADER_OPCODE_SIN: case SHADER_OPCODE_COS: if (brw->gen == 6) { generate_math1_gen6(inst, dst, src[0]); } else { /* Also works for Gen7. */ generate_math1_gen4(inst, dst, src[0]); } break; case SHADER_OPCODE_POW: case SHADER_OPCODE_INT_QUOTIENT: case SHADER_OPCODE_INT_REMAINDER: if (brw->gen >= 7) { generate_math2_gen7(inst, dst, src[0], src[1]); } else if (brw->gen == 6) { generate_math2_gen6(inst, dst, src[0], src[1]); } else { generate_math2_gen4(inst, dst, src[0], src[1]); } break; case SHADER_OPCODE_TEX: case SHADER_OPCODE_TXD: case SHADER_OPCODE_TXF: case SHADER_OPCODE_TXF_CMS: case SHADER_OPCODE_TXF_MCS: case SHADER_OPCODE_TXL: case SHADER_OPCODE_TXS: case SHADER_OPCODE_TG4: case SHADER_OPCODE_TG4_OFFSET: generate_tex(inst, dst, src[0]); break; case VS_OPCODE_URB_WRITE: generate_vs_urb_write(inst); break; case SHADER_OPCODE_GEN4_SCRATCH_READ: generate_scratch_read(inst, dst, src[0]); break; case SHADER_OPCODE_GEN4_SCRATCH_WRITE: generate_scratch_write(inst, dst, src[0], src[1]); break; case VS_OPCODE_PULL_CONSTANT_LOAD: generate_pull_constant_load(inst, dst, src[0], src[1]); break; case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7: generate_pull_constant_load_gen7(inst, dst, src[0], src[1]); break; case GS_OPCODE_URB_WRITE: generate_gs_urb_write(inst); break; case GS_OPCODE_THREAD_END: generate_gs_thread_end(inst); break; case GS_OPCODE_SET_WRITE_OFFSET: generate_gs_set_write_offset(dst, src[0], src[1]); break; case GS_OPCODE_SET_VERTEX_COUNT: generate_gs_set_vertex_count(dst, src[0]); break; case GS_OPCODE_SET_DWORD_2_IMMED: generate_gs_set_dword_2_immed(dst, src[0]); break; case GS_OPCODE_PREPARE_CHANNEL_MASKS: generate_gs_prepare_channel_masks(dst); break; case GS_OPCODE_SET_CHANNEL_MASKS: generate_gs_set_channel_masks(dst, src[0]); break; case GS_OPCODE_GET_INSTANCE_ID: generate_gs_get_instance_id(dst); break; case SHADER_OPCODE_SHADER_TIME_ADD: brw_shader_time_add(p, src[0], prog_data->base.binding_table.shader_time_start); brw_mark_surface_used(&prog_data->base, prog_data->base.binding_table.shader_time_start); break; case SHADER_OPCODE_UNTYPED_ATOMIC: generate_untyped_atomic(inst, dst, src[0], src[1]); break; case SHADER_OPCODE_UNTYPED_SURFACE_READ: generate_untyped_surface_read(inst, dst, src[0]); break; case VS_OPCODE_UNPACK_FLAGS_SIMD4X2: generate_unpack_flags(inst, dst); break; default: if (inst->opcode < (int) ARRAY_SIZE(opcode_descs)) { _mesa_problem(&brw->ctx, "Unsupported opcode in `%s' in vec4\n", opcode_descs[inst->opcode].name); } else { _mesa_problem(&brw->ctx, "Unsupported opcode %d in vec4", inst->opcode); } abort(); } }
void vec4_generator::generate_tex(vec4_instruction *inst, struct brw_reg dst, struct brw_reg src) { int msg_type = -1; if (brw->gen >= 5) { switch (inst->opcode) { case SHADER_OPCODE_TEX: case SHADER_OPCODE_TXL: if (inst->shadow_compare) { msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE; } else { msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD; } break; case SHADER_OPCODE_TXD: if (inst->shadow_compare) { /* Gen7.5+. Otherwise, lowered by brw_lower_texture_gradients(). */ assert(brw->is_haswell); msg_type = HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE; } else { msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS; } break; case SHADER_OPCODE_TXF: msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; break; case SHADER_OPCODE_TXF_CMS: if (brw->gen >= 7) msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS; else msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; break; case SHADER_OPCODE_TXF_MCS: assert(brw->gen >= 7); msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS; break; case SHADER_OPCODE_TXS: msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO; break; case SHADER_OPCODE_TG4: if (inst->shadow_compare) { msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C; } else { msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4; } break; case SHADER_OPCODE_TG4_OFFSET: if (inst->shadow_compare) { msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C; } else { msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO; } break; default: assert(!"should not get here: invalid vec4 texture opcode"); break; } } else { switch (inst->opcode) { case SHADER_OPCODE_TEX: case SHADER_OPCODE_TXL: if (inst->shadow_compare) { msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE; assert(inst->mlen == 3); } else { msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD; assert(inst->mlen == 2); } break; case SHADER_OPCODE_TXD: /* There is no sample_d_c message; comparisons are done manually. */ msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS; assert(inst->mlen == 4); break; case SHADER_OPCODE_TXF: msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_LD; assert(inst->mlen == 2); break; case SHADER_OPCODE_TXS: msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO; assert(inst->mlen == 2); break; default: assert(!"should not get here: invalid vec4 texture opcode"); break; } } assert(msg_type != -1); /* Load the message header if present. If there's a texture offset, we need * to set it up explicitly and load the offset bitfield. Otherwise, we can * use an implied move from g0 to the first message register. */ if (inst->header_present) { if (brw->gen < 6 && !inst->texture_offset) { /* Set up an implied move from g0 to the MRF. */ src = brw_vec8_grf(0, 0); } else { struct brw_reg header = retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD); /* Explicitly set up the message header by copying g0 to the MRF. */ brw_push_insn_state(p); brw_set_mask_control(p, BRW_MASK_DISABLE); brw_MOV(p, header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)); brw_set_access_mode(p, BRW_ALIGN_1); if (inst->texture_offset) { /* Set the texel offset bits in DWord 2. */ brw_MOV(p, get_element_ud(header, 2), brw_imm_ud(inst->texture_offset)); } if (inst->sampler >= 16) { /* The "Sampler Index" field can only store values between 0 and 15. * However, we can add an offset to the "Sampler State Pointer" * field, effectively selecting a different set of 16 samplers. * * The "Sampler State Pointer" needs to be aligned to a 32-byte * offset, and each sampler state is only 16-bytes, so we can't * exclusively use the offset - we have to use both. */ assert(brw->is_haswell); /* field only exists on Haswell */ brw_ADD(p, get_element_ud(header, 3), get_element_ud(brw_vec8_grf(0, 0), 3), brw_imm_ud(16 * (inst->sampler / 16) * sizeof(gen7_sampler_state))); } brw_pop_insn_state(p); } } uint32_t return_format; switch (dst.type) { case BRW_REGISTER_TYPE_D: return_format = BRW_SAMPLER_RETURN_FORMAT_SINT32; break; case BRW_REGISTER_TYPE_UD: return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32; break; default: return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32; break; } uint32_t surface_index = ((inst->opcode == SHADER_OPCODE_TG4 || inst->opcode == SHADER_OPCODE_TG4_OFFSET) ? prog_data->base.binding_table.gather_texture_start : prog_data->base.binding_table.texture_start) + inst->sampler; brw_SAMPLE(p, dst, inst->base_mrf, src, surface_index, inst->sampler % 16, msg_type, 1, /* response length */ inst->mlen, inst->header_present, BRW_SAMPLER_SIMD_MODE_SIMD4X2, return_format); brw_mark_surface_used(&prog_data->base, surface_index); }
/* Interpolate between two vertices and put the result into a0.0. * Increment a0.0 accordingly. */ void brw_clip_interp_vertex( struct brw_clip_compile *c, struct brw_indirect dest_ptr, struct brw_indirect v0_ptr, /* from */ struct brw_indirect v1_ptr, /* to */ struct brw_reg t0, boolean force_edgeflag) { #if 0 struct brw_compile *p = &c->func; struct brw_reg tmp = get_tmp(c); unsigned i; /* Just copy the vertex header: */ brw_copy_indirect_to_indirect(p, dest_ptr, v0_ptr, 1); /* Iterate over each attribute (could be done in pairs?) */ for (i = 0; i < c->nr_attrs; i++) { unsigned delta = i*16 + 32; if (delta == c->offset[VERT_RESULT_EDGE]) { if (force_edgeflag) brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(1)); else brw_MOV(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta)); } else { /* Interpolate: * * New = attr0 + t*attr1 - t*attr0 */ brw_MUL(p, vec4(brw_null_reg()), deref_4f(v1_ptr, delta), t0); brw_MAC(p, tmp, negate(deref_4f(v0_ptr, delta)), t0); brw_ADD(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta), tmp); } } if (i & 1) { unsigned delta = i*16 + 32; brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(0)); } release_tmp(c, tmp); /* Recreate the projected (NDC) coordinate in the new vertex * header: */ brw_clip_project_vertex(c, dest_ptr ); #else #warning "disabled" #endif }
/** * Apply an additive offset to DWORD 2 of c->reg.header. * * This is used to set/unset the "PrimStart" and "PrimEnd" flags appropriately * for each vertex. */ static void brw_gs_offset_header_dw2(struct brw_gs_compile *c, int offset) { struct brw_compile *p = &c->func; brw_ADD(p, get_element_d(c->reg.header, 2), get_element_d(c->reg.header, 2), brw_imm_d(offset)); }
void brw_emit_tri_setup(struct brw_sf_compile *c, bool allocate) { struct brw_compile *p = &c->func; GLuint i; c->flag_value = 0xff; c->nr_verts = 3; if (allocate) alloc_regs(c); invert_det(c); copy_z_inv_w(c); if (c->key.do_twoside_color) do_twoside_color(c); if (c->has_flat_shading) do_flatshade_triangle(c); for (i = 0; i < c->nr_setup_regs; i++) { /* Pair of incoming attributes: */ struct brw_reg a0 = offset(c->vert[0], i); struct brw_reg a1 = offset(c->vert[1], i); struct brw_reg a2 = offset(c->vert[2], i); GLushort pc, pc_persp, pc_linear; bool last = calculate_masks(c, i, &pc, &pc_persp, &pc_linear); if (pc_persp) { set_predicate_control_flag_value(p, c, pc_persp); brw_MUL(p, a0, a0, c->inv_w[0]); brw_MUL(p, a1, a1, c->inv_w[1]); brw_MUL(p, a2, a2, c->inv_w[2]); } /* Calculate coefficients for interpolated values: */ if (pc_linear) { set_predicate_control_flag_value(p, c, pc_linear); brw_ADD(p, c->a1_sub_a0, a1, negate(a0)); brw_ADD(p, c->a2_sub_a0, a2, negate(a0)); /* calculate dA/dx */ brw_MUL(p, brw_null_reg(), c->a1_sub_a0, c->dy2); brw_MAC(p, c->tmp, c->a2_sub_a0, negate(c->dy0)); brw_MUL(p, c->m1Cx, c->tmp, c->inv_det); /* calculate dA/dy */ brw_MUL(p, brw_null_reg(), c->a2_sub_a0, c->dx0); brw_MAC(p, c->tmp, c->a1_sub_a0, negate(c->dx2)); brw_MUL(p, c->m2Cy, c->tmp, c->inv_det); } { set_predicate_control_flag_value(p, c, pc); /* start point for interpolation */ brw_MOV(p, c->m3C0, a0); /* Copy m0..m3 to URB. m0 is implicitly copied from r0 in * the send instruction: */ brw_urb_WRITE(p, brw_null_reg(), 0, brw_vec8_grf(0, 0), /* r0, will be copied to m0 */ last ? BRW_URB_WRITE_EOT_COMPLETE : BRW_URB_WRITE_NO_FLAGS, 4, /* msg len */ 0, /* response len */ i*4, /* offset */ BRW_URB_SWIZZLE_TRANSPOSE); /* XXX: Swizzle control "SF to windower" */ } } brw_set_default_predicate_control(p, BRW_PREDICATE_NONE); }
/*********************************************************************** * Output clipped polygon as an unfilled primitive: */ static void emit_lines(struct brw_clip_compile *c, bool do_offset) { struct brw_compile *p = &c->func; const struct brw_context *brw = p->brw; struct brw_indirect v0 = brw_indirect(0, 0); struct brw_indirect v1 = brw_indirect(1, 0); struct brw_indirect v0ptr = brw_indirect(2, 0); struct brw_indirect v1ptr = brw_indirect(3, 0); /* Need a seperate loop for offset: */ if (do_offset) { brw_MOV(p, c->reg.loopcount, c->reg.nr_verts); brw_MOV(p, get_addr_reg(v0ptr), brw_address(c->reg.inlist)); brw_DO(p, BRW_EXECUTE_1); { brw_MOV(p, get_addr_reg(v0), deref_1uw(v0ptr, 0)); brw_ADD(p, get_addr_reg(v0ptr), get_addr_reg(v0ptr), brw_imm_uw(2)); apply_one_offset(c, v0); brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1)); brw_inst_set_cond_modifier(brw, brw_last_inst, BRW_CONDITIONAL_G); } brw_WHILE(p); brw_inst_set_pred_control(brw, brw_last_inst, BRW_PREDICATE_NORMAL); } /* v1ptr = &inlist[nr_verts] * *v1ptr = v0 */ brw_MOV(p, c->reg.loopcount, c->reg.nr_verts); brw_MOV(p, get_addr_reg(v0ptr), brw_address(c->reg.inlist)); brw_ADD(p, get_addr_reg(v1ptr), get_addr_reg(v0ptr), retype(c->reg.nr_verts, BRW_REGISTER_TYPE_UW)); brw_ADD(p, get_addr_reg(v1ptr), get_addr_reg(v1ptr), retype(c->reg.nr_verts, BRW_REGISTER_TYPE_UW)); brw_MOV(p, deref_1uw(v1ptr, 0), deref_1uw(v0ptr, 0)); brw_DO(p, BRW_EXECUTE_1); { brw_MOV(p, get_addr_reg(v0), deref_1uw(v0ptr, 0)); brw_MOV(p, get_addr_reg(v1), deref_1uw(v0ptr, 2)); brw_ADD(p, get_addr_reg(v0ptr), get_addr_reg(v0ptr), brw_imm_uw(2)); /* draw edge if edgeflag != 0 */ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_NZ, deref_1f(v0, brw_varying_to_offset(&c->vue_map, VARYING_SLOT_EDGE)), brw_imm_f(0)); brw_IF(p, BRW_EXECUTE_1); { brw_clip_emit_vue(c, v0, BRW_URB_WRITE_ALLOCATE_COMPLETE, (_3DPRIM_LINESTRIP << URB_WRITE_PRIM_TYPE_SHIFT) | URB_WRITE_PRIM_START); brw_clip_emit_vue(c, v1, BRW_URB_WRITE_ALLOCATE_COMPLETE, (_3DPRIM_LINESTRIP << URB_WRITE_PRIM_TYPE_SHIFT) | URB_WRITE_PRIM_END); } brw_ENDIF(p); brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1)); brw_inst_set_cond_modifier(brw, brw_last_inst, BRW_CONDITIONAL_NZ); } brw_WHILE(p); brw_inst_set_pred_control(brw, brw_last_inst, BRW_PREDICATE_NORMAL); }
/** * Read float[4] constant(s) from VS constant buffer. * For relative addressing, two float[4] constants will be read into 'dest'. * Otherwise, one float[4] constant will be read into the lower half of 'dest'. */ void brw_dp_READ_4_vs(struct brw_compile *p, struct brw_reg dest, GLuint oword, GLboolean relAddr, struct brw_reg addrReg, GLuint location, GLuint bind_table_index) { GLuint msg_reg_nr = 1; assert(oword < 2); /* printf("vs const read msg, location %u, msg_reg_nr %d\n", location, msg_reg_nr); */ /* Setup MRF[1] with location/offset into const buffer */ { struct brw_reg b; brw_push_insn_state(p); brw_set_compression_control(p, BRW_COMPRESSION_NONE); brw_set_mask_control(p, BRW_MASK_DISABLE); brw_set_predicate_control(p, BRW_PREDICATE_NONE); /*brw_set_access_mode(p, BRW_ALIGN_16);*/ /* XXX I think we're setting all the dwords of MRF[1] to 'location'. * when the docs say only dword[2] should be set. Hmmm. But it works. */ b = brw_message_reg(msg_reg_nr); b = retype(b, BRW_REGISTER_TYPE_UD); /*b = get_element_ud(b, 2);*/ if (relAddr) { brw_ADD(p, b, addrReg, brw_imm_ud(location)); } else { brw_MOV(p, b, brw_imm_ud(location)); } brw_pop_insn_state(p); } { struct brw_instruction *insn = next_insn(p, BRW_OPCODE_SEND); insn->header.predicate_control = BRW_PREDICATE_NONE; insn->header.compression_control = BRW_COMPRESSION_NONE; insn->header.destreg__conditionalmod = msg_reg_nr; insn->header.mask_control = BRW_MASK_DISABLE; /*insn->header.access_mode = BRW_ALIGN_16;*/ brw_set_dest(insn, dest); brw_set_src0(insn, brw_null_reg()); brw_set_dp_read_message(p->brw, insn, bind_table_index, oword, /* 0 = lower Oword, 1 = upper Oword */ BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ, /* msg_type */ 0, /* source cache = data cache */ 1, /* msg_length */ 1, /* response_length (1 Oword) */ 0); /* eot */ } }
void vec4_generator::generate_code(exec_list *instructions) { int last_native_insn_offset = 0; const char *last_annotation_string = NULL; const void *last_annotation_ir = NULL; if (unlikely(INTEL_DEBUG & DEBUG_VS)) { if (shader) { printf("Native code for vertex shader %d:\n", prog->Name); } else { printf("Native code for vertex program %d:\n", c->vp->program.Base.Id); } } foreach_list(node, instructions) { vec4_instruction *inst = (vec4_instruction *)node; struct brw_reg src[3], dst; if (unlikely(INTEL_DEBUG & DEBUG_VS)) { if (last_annotation_ir != inst->ir) { last_annotation_ir = inst->ir; if (last_annotation_ir) { printf(" "); if (shader) { ((ir_instruction *) last_annotation_ir)->print(); } else { const prog_instruction *vpi; vpi = (const prog_instruction *) inst->ir; printf("%d: ", (int)(vpi - vp->Base.Instructions)); _mesa_fprint_instruction_opt(stdout, vpi, 0, PROG_PRINT_DEBUG, NULL); } printf("\n"); } } if (last_annotation_string != inst->annotation) { last_annotation_string = inst->annotation; if (last_annotation_string) printf(" %s\n", last_annotation_string); } } for (unsigned int i = 0; i < 3; i++) { src[i] = inst->get_src(i); } dst = inst->get_dst(); brw_set_conditionalmod(p, inst->conditional_mod); brw_set_predicate_control(p, inst->predicate); brw_set_predicate_inverse(p, inst->predicate_inverse); brw_set_saturate(p, inst->saturate); switch (inst->opcode) { case BRW_OPCODE_MOV: brw_MOV(p, dst, src[0]); break; case BRW_OPCODE_ADD: brw_ADD(p, dst, src[0], src[1]); break; case BRW_OPCODE_MUL: brw_MUL(p, dst, src[0], src[1]); break; case BRW_OPCODE_MACH: brw_set_acc_write_control(p, 1); brw_MACH(p, dst, src[0], src[1]); brw_set_acc_write_control(p, 0); break; case BRW_OPCODE_FRC: brw_FRC(p, dst, src[0]); break; case BRW_OPCODE_RNDD: brw_RNDD(p, dst, src[0]); break; case BRW_OPCODE_RNDE: brw_RNDE(p, dst, src[0]); break; case BRW_OPCODE_RNDZ: brw_RNDZ(p, dst, src[0]); break; case BRW_OPCODE_AND: brw_AND(p, dst, src[0], src[1]); break; case BRW_OPCODE_OR: brw_OR(p, dst, src[0], src[1]); break; case BRW_OPCODE_XOR: brw_XOR(p, dst, src[0], src[1]); break; case BRW_OPCODE_NOT: brw_NOT(p, dst, src[0]); break; case BRW_OPCODE_ASR: brw_ASR(p, dst, src[0], src[1]); break; case BRW_OPCODE_SHR: brw_SHR(p, dst, src[0], src[1]); break; case BRW_OPCODE_SHL: brw_SHL(p, dst, src[0], src[1]); break; case BRW_OPCODE_CMP: brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]); break; case BRW_OPCODE_SEL: brw_SEL(p, dst, src[0], src[1]); break; case BRW_OPCODE_DPH: brw_DPH(p, dst, src[0], src[1]); break; case BRW_OPCODE_DP4: brw_DP4(p, dst, src[0], src[1]); break; case BRW_OPCODE_DP3: brw_DP3(p, dst, src[0], src[1]); break; case BRW_OPCODE_DP2: brw_DP2(p, dst, src[0], src[1]); break; case BRW_OPCODE_IF: if (inst->src[0].file != BAD_FILE) { /* The instruction has an embedded compare (only allowed on gen6) */ assert(intel->gen == 6); gen6_IF(p, inst->conditional_mod, src[0], src[1]); } else { struct brw_instruction *brw_inst = brw_IF(p, BRW_EXECUTE_8); brw_inst->header.predicate_control = inst->predicate; } break; case BRW_OPCODE_ELSE: brw_ELSE(p); break; case BRW_OPCODE_ENDIF: brw_ENDIF(p); break; case BRW_OPCODE_DO: brw_DO(p, BRW_EXECUTE_8); break; case BRW_OPCODE_BREAK: brw_BREAK(p); brw_set_predicate_control(p, BRW_PREDICATE_NONE); break; case BRW_OPCODE_CONTINUE: /* FINISHME: We need to write the loop instruction support still. */ if (intel->gen >= 6) gen6_CONT(p); else brw_CONT(p); brw_set_predicate_control(p, BRW_PREDICATE_NONE); break; case BRW_OPCODE_WHILE: brw_WHILE(p); break; default: generate_vs_instruction(inst, dst, src); break; } if (unlikely(INTEL_DEBUG & DEBUG_VS)) { brw_dump_compile(p, stdout, last_native_insn_offset, p->next_insn_offset); } last_native_insn_offset = p->next_insn_offset; }
/* Interpolate between two vertices and put the result into a0.0. * Increment a0.0 accordingly. */ void brw_clip_interp_vertex( struct brw_clip_compile *c, struct brw_indirect dest_ptr, struct brw_indirect v0_ptr, /* from */ struct brw_indirect v1_ptr, /* to */ struct brw_reg t0, bool force_edgeflag) { struct brw_compile *p = &c->func; struct brw_reg tmp = get_tmp(c); GLuint slot; /* Just copy the vertex header: */ /* * After CLIP stage, only first 256 bits of the VUE are read * back on Ironlake, so needn't change it */ brw_copy_indirect_to_indirect(p, dest_ptr, v0_ptr, 1); /* Iterate over each attribute (could be done in pairs?) */ for (slot = 0; slot < c->vue_map.num_slots; slot++) { int varying = c->vue_map.slot_to_varying[slot]; GLuint delta = brw_vue_slot_to_offset(slot); if (varying == VARYING_SLOT_EDGE) { if (force_edgeflag) brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(1)); else brw_MOV(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta)); } else if (varying == VARYING_SLOT_PSIZ || varying == VARYING_SLOT_CLIP_DIST0 || varying == VARYING_SLOT_CLIP_DIST1) { /* PSIZ doesn't need interpolation because it isn't used by the * fragment shader. CLIP_DIST0 and CLIP_DIST1 don't need * intepolation because on pre-GEN6, these are just placeholder VUE * slots that don't perform any action. */ } else if (varying < VARYING_SLOT_MAX) { /* This is a true vertex result (and not a special value for the VUE * header), so interpolate: * * New = attr0 + t*attr1 - t*attr0 */ brw_MUL(p, vec4(brw_null_reg()), deref_4f(v1_ptr, delta), t0); brw_MAC(p, tmp, negate(deref_4f(v0_ptr, delta)), t0); brw_ADD(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta), tmp); } } if (c->vue_map.num_slots % 2) { GLuint delta = brw_vue_slot_to_offset(c->vue_map.num_slots); brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(0)); } release_tmp(c, tmp); /* Recreate the projected (NDC) coordinate in the new vertex * header: */ brw_clip_project_vertex(c, dest_ptr ); }
/*********************************************************************** * Output clipped polygon as an unfilled primitive: */ static void emit_lines(struct brw_clip_compile *c, GLboolean do_offset) { struct brw_compile *p = &c->func; struct brw_instruction *loop; struct brw_instruction *draw_edge; struct brw_indirect v0 = brw_indirect(0, 0); struct brw_indirect v1 = brw_indirect(1, 0); struct brw_indirect v0ptr = brw_indirect(2, 0); struct brw_indirect v1ptr = brw_indirect(3, 0); /* Need a seperate loop for offset: */ if (do_offset) { brw_MOV(p, c->reg.loopcount, c->reg.nr_verts); brw_MOV(p, get_addr_reg(v0ptr), brw_address(c->reg.inlist)); loop = brw_DO(p, BRW_EXECUTE_1); { brw_MOV(p, get_addr_reg(v0), deref_1uw(v0ptr, 0)); brw_ADD(p, get_addr_reg(v0ptr), get_addr_reg(v0ptr), brw_imm_uw(2)); apply_one_offset(c, v0); brw_set_conditionalmod(p, BRW_CONDITIONAL_G); brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1)); } brw_WHILE(p, loop); } /* v1ptr = &inlist[nr_verts] * *v1ptr = v0 */ brw_MOV(p, c->reg.loopcount, c->reg.nr_verts); brw_MOV(p, get_addr_reg(v0ptr), brw_address(c->reg.inlist)); brw_ADD(p, get_addr_reg(v1ptr), get_addr_reg(v0ptr), retype(c->reg.nr_verts, BRW_REGISTER_TYPE_UW)); brw_ADD(p, get_addr_reg(v1ptr), get_addr_reg(v1ptr), retype(c->reg.nr_verts, BRW_REGISTER_TYPE_UW)); brw_MOV(p, deref_1uw(v1ptr, 0), deref_1uw(v0ptr, 0)); loop = brw_DO(p, BRW_EXECUTE_1); { brw_MOV(p, get_addr_reg(v0), deref_1uw(v0ptr, 0)); brw_MOV(p, get_addr_reg(v1), deref_1uw(v0ptr, 2)); brw_ADD(p, get_addr_reg(v0ptr), get_addr_reg(v0ptr), brw_imm_uw(2)); /* draw edge if edgeflag != 0 */ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_NZ, deref_1f(v0, c->offset[VERT_RESULT_EDGE]), brw_imm_f(0)); draw_edge = brw_IF(p, BRW_EXECUTE_1); { brw_clip_emit_vue(c, v0, 1, 0, (_3DPRIM_LINESTRIP << 2) | R02_PRIM_START); brw_clip_emit_vue(c, v1, 1, 0, (_3DPRIM_LINESTRIP << 2) | R02_PRIM_END); } brw_ENDIF(p, draw_edge); brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ); brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1)); } brw_WHILE(p, loop); }
/* Interpolate between two vertices and put the result into a0.0. * Increment a0.0 accordingly. */ void brw_clip_interp_vertex( struct brw_clip_compile *c, struct brw_indirect dest_ptr, struct brw_indirect v0_ptr, /* from */ struct brw_indirect v1_ptr, /* to */ struct brw_reg t0, GLboolean force_edgeflag) { struct brw_compile *p = &c->func; struct brw_reg tmp = get_tmp(c); GLuint i; /* Just copy the vertex header: */ /* * After CLIP stage, only first 256 bits of the VUE are read * back on IGDNG, so needn't change it */ brw_copy_indirect_to_indirect(p, dest_ptr, v0_ptr, 1); /* Iterate over each attribute (could be done in pairs?) */ for (i = 0; i < c->nr_attrs; i++) { GLuint delta = i*16 + 32; if (BRW_IS_IGDNG(p->brw)) delta = i * 16 + 32 * 3; if (delta == c->offset[VERT_RESULT_EDGE]) { if (force_edgeflag) brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(1)); else brw_MOV(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta)); } else { /* Interpolate: * * New = attr0 + t*attr1 - t*attr0 */ brw_MUL(p, vec4(brw_null_reg()), deref_4f(v1_ptr, delta), t0); brw_MAC(p, tmp, negate(deref_4f(v0_ptr, delta)), t0); brw_ADD(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta), tmp); } } if (i & 1) { GLuint delta = i*16 + 32; if (BRW_IS_IGDNG(p->brw)) delta = i * 16 + 32 * 3; brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(0)); } release_tmp(c, tmp); /* Recreate the projected (NDC) coordinate in the new vertex * header: */ brw_clip_project_vertex(c, dest_ptr ); }
/* Use mesa's clipping algorithms, translated to GEN4 assembly. */ void brw_clip_tri( struct brw_clip_compile *c ) { struct brw_compile *p = &c->func; struct brw_indirect vtx = brw_indirect(0, 0); struct brw_indirect vtxPrev = brw_indirect(1, 0); struct brw_indirect vtxOut = brw_indirect(2, 0); struct brw_indirect plane_ptr = brw_indirect(3, 0); struct brw_indirect inlist_ptr = brw_indirect(4, 0); struct brw_indirect outlist_ptr = brw_indirect(5, 0); struct brw_indirect freelist_ptr = brw_indirect(6, 0); struct brw_instruction *plane_loop; struct brw_instruction *plane_active; struct brw_instruction *vertex_loop; struct brw_instruction *next_test; struct brw_instruction *prev_test; brw_MOV(p, get_addr_reg(vtxPrev), brw_address(c->reg.vertex[2]) ); brw_MOV(p, get_addr_reg(plane_ptr), brw_clip_plane0_address(c)); brw_MOV(p, get_addr_reg(inlist_ptr), brw_address(c->reg.inlist)); brw_MOV(p, get_addr_reg(outlist_ptr), brw_address(c->reg.outlist)); brw_MOV(p, get_addr_reg(freelist_ptr), brw_address(c->reg.vertex[3]) ); plane_loop = brw_DO(p, BRW_EXECUTE_1); { /* if (planemask & 1) */ brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ); brw_AND(p, vec1(brw_null_reg()), c->reg.planemask, brw_imm_ud(1)); plane_active = brw_IF(p, BRW_EXECUTE_1); { /* vtxOut = freelist_ptr++ */ brw_MOV(p, get_addr_reg(vtxOut), get_addr_reg(freelist_ptr) ); brw_ADD(p, get_addr_reg(freelist_ptr), get_addr_reg(freelist_ptr), brw_imm_uw(c->nr_regs * REG_SIZE)); if (c->key.nr_userclip) brw_MOV(p, c->reg.plane_equation, deref_4f(plane_ptr, 0)); else brw_MOV(p, c->reg.plane_equation, deref_4b(plane_ptr, 0)); brw_MOV(p, c->reg.loopcount, c->reg.nr_verts); brw_MOV(p, c->reg.nr_verts, brw_imm_ud(0)); vertex_loop = brw_DO(p, BRW_EXECUTE_1); { /* vtx = *input_ptr; */ brw_MOV(p, get_addr_reg(vtx), deref_1uw(inlist_ptr, 0)); /* IS_NEGATIVE(prev) */ brw_set_conditionalmod(p, BRW_CONDITIONAL_L); brw_DP4(p, vec4(c->reg.dpPrev), deref_4f(vtxPrev, c->offset_hpos), c->reg.plane_equation); prev_test = brw_IF(p, BRW_EXECUTE_1); { /* IS_POSITIVE(next) */ brw_set_conditionalmod(p, BRW_CONDITIONAL_GE); brw_DP4(p, vec4(c->reg.dp), deref_4f(vtx, c->offset_hpos), c->reg.plane_equation); next_test = brw_IF(p, BRW_EXECUTE_1); { /* Coming back in. */ brw_ADD(p, c->reg.t, c->reg.dpPrev, negate(c->reg.dp)); brw_math_invert(p, c->reg.t, c->reg.t); brw_MUL(p, c->reg.t, c->reg.t, c->reg.dpPrev); /* If (vtxOut == 0) vtxOut = vtxPrev */ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_EQ, get_addr_reg(vtxOut), brw_imm_uw(0) ); brw_MOV(p, get_addr_reg(vtxOut), get_addr_reg(vtxPrev) ); brw_set_predicate_control(p, BRW_PREDICATE_NONE); brw_clip_interp_vertex(c, vtxOut, vtxPrev, vtx, c->reg.t, GL_FALSE); /* *outlist_ptr++ = vtxOut; * nr_verts++; * vtxOut = 0; */ brw_MOV(p, deref_1uw(outlist_ptr, 0), get_addr_reg(vtxOut)); brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_uw(sizeof(short))); brw_ADD(p, c->reg.nr_verts, c->reg.nr_verts, brw_imm_ud(1)); brw_MOV(p, get_addr_reg(vtxOut), brw_imm_uw(0) ); } brw_ENDIF(p, next_test); } prev_test = brw_ELSE(p, prev_test); { /* *outlist_ptr++ = vtxPrev; * nr_verts++; */ brw_MOV(p, deref_1uw(outlist_ptr, 0), get_addr_reg(vtxPrev)); brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_uw(sizeof(short))); brw_ADD(p, c->reg.nr_verts, c->reg.nr_verts, brw_imm_ud(1)); /* IS_NEGATIVE(next) */ brw_set_conditionalmod(p, BRW_CONDITIONAL_L); brw_DP4(p, vec4(c->reg.dp), deref_4f(vtx, c->offset_hpos), c->reg.plane_equation); next_test = brw_IF(p, BRW_EXECUTE_1); { /* Going out of bounds. Avoid division by zero as we * know dp != dpPrev from DIFFERENT_SIGNS, above. */ brw_ADD(p, c->reg.t, c->reg.dp, negate(c->reg.dpPrev)); brw_math_invert(p, c->reg.t, c->reg.t); brw_MUL(p, c->reg.t, c->reg.t, c->reg.dp); /* If (vtxOut == 0) vtxOut = vtx */ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_EQ, get_addr_reg(vtxOut), brw_imm_uw(0) ); brw_MOV(p, get_addr_reg(vtxOut), get_addr_reg(vtx) ); brw_set_predicate_control(p, BRW_PREDICATE_NONE); brw_clip_interp_vertex(c, vtxOut, vtx, vtxPrev, c->reg.t, GL_TRUE); /* *outlist_ptr++ = vtxOut; * nr_verts++; * vtxOut = 0; */ brw_MOV(p, deref_1uw(outlist_ptr, 0), get_addr_reg(vtxOut)); brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_uw(sizeof(short))); brw_ADD(p, c->reg.nr_verts, c->reg.nr_verts, brw_imm_ud(1)); brw_MOV(p, get_addr_reg(vtxOut), brw_imm_uw(0) ); } brw_ENDIF(p, next_test); } brw_ENDIF(p, prev_test); /* vtxPrev = vtx; * inlist_ptr++; */ brw_MOV(p, get_addr_reg(vtxPrev), get_addr_reg(vtx)); brw_ADD(p, get_addr_reg(inlist_ptr), get_addr_reg(inlist_ptr), brw_imm_uw(sizeof(short))); /* while (--loopcount != 0) */ brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ); brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1)); } brw_WHILE(p, vertex_loop); /* vtxPrev = *(outlist_ptr-1) OR: outlist[nr_verts-1] * inlist = outlist * inlist_ptr = &inlist[0] * outlist_ptr = &outlist[0] */ brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_w(-2)); brw_MOV(p, get_addr_reg(vtxPrev), deref_1uw(outlist_ptr, 0)); brw_MOV(p, brw_vec8_grf(c->reg.inlist.nr, 0), brw_vec8_grf(c->reg.outlist.nr, 0)); brw_MOV(p, get_addr_reg(inlist_ptr), brw_address(c->reg.inlist)); brw_MOV(p, get_addr_reg(outlist_ptr), brw_address(c->reg.outlist)); } brw_ENDIF(p, plane_active); /* plane_ptr++; */ brw_ADD(p, get_addr_reg(plane_ptr), get_addr_reg(plane_ptr), brw_clip_plane_stride(c)); /* nr_verts >= 3 */ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_GE, c->reg.nr_verts, brw_imm_ud(3)); /* && (planemask>>=1) != 0 */ brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ); brw_SHR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud(1)); } brw_WHILE(p, plane_loop); }
void vec4_generator::generate_tex(vec4_instruction *inst, struct brw_reg dst, struct brw_reg src, struct brw_reg sampler_index) { int msg_type = -1; if (brw->gen >= 5) { switch (inst->opcode) { case SHADER_OPCODE_TEX: case SHADER_OPCODE_TXL: if (inst->shadow_compare) { msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE; } else { msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD; } break; case SHADER_OPCODE_TXD: if (inst->shadow_compare) { /* Gen7.5+. Otherwise, lowered by brw_lower_texture_gradients(). */ assert(brw->gen >= 8 || brw->is_haswell); msg_type = HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE; } else { msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS; } break; case SHADER_OPCODE_TXF: msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; break; case SHADER_OPCODE_TXF_CMS: if (brw->gen >= 7) msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS; else msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD; break; case SHADER_OPCODE_TXF_MCS: assert(brw->gen >= 7); msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS; break; case SHADER_OPCODE_TXS: msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO; break; case SHADER_OPCODE_TG4: if (inst->shadow_compare) { msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C; } else { msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4; } break; case SHADER_OPCODE_TG4_OFFSET: if (inst->shadow_compare) { msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C; } else { msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO; } break; default: unreachable("should not get here: invalid vec4 texture opcode"); } } else { switch (inst->opcode) { case SHADER_OPCODE_TEX: case SHADER_OPCODE_TXL: if (inst->shadow_compare) { msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE; assert(inst->mlen == 3); } else { msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD; assert(inst->mlen == 2); } break; case SHADER_OPCODE_TXD: /* There is no sample_d_c message; comparisons are done manually. */ msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS; assert(inst->mlen == 4); break; case SHADER_OPCODE_TXF: msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_LD; assert(inst->mlen == 2); break; case SHADER_OPCODE_TXS: msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO; assert(inst->mlen == 2); break; default: unreachable("should not get here: invalid vec4 texture opcode"); } } assert(msg_type != -1); assert(sampler_index.type == BRW_REGISTER_TYPE_UD); /* Load the message header if present. If there's a texture offset, we need * to set it up explicitly and load the offset bitfield. Otherwise, we can * use an implied move from g0 to the first message register. */ if (inst->header_present) { if (brw->gen < 6 && !inst->texture_offset) { /* Set up an implied move from g0 to the MRF. */ src = brw_vec8_grf(0, 0); } else { struct brw_reg header = retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD); /* Explicitly set up the message header by copying g0 to the MRF. */ brw_push_insn_state(p); brw_set_default_mask_control(p, BRW_MASK_DISABLE); brw_MOV(p, header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)); brw_set_default_access_mode(p, BRW_ALIGN_1); if (inst->texture_offset) { /* Set the texel offset bits in DWord 2. */ brw_MOV(p, get_element_ud(header, 2), brw_imm_ud(inst->texture_offset)); } brw_adjust_sampler_state_pointer(p, header, sampler_index, dst); brw_pop_insn_state(p); } } uint32_t return_format; switch (dst.type) { case BRW_REGISTER_TYPE_D: return_format = BRW_SAMPLER_RETURN_FORMAT_SINT32; break; case BRW_REGISTER_TYPE_UD: return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32; break; default: return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32; break; } uint32_t base_binding_table_index = (inst->opcode == SHADER_OPCODE_TG4 || inst->opcode == SHADER_OPCODE_TG4_OFFSET) ? prog_data->base.binding_table.gather_texture_start : prog_data->base.binding_table.texture_start; if (sampler_index.file == BRW_IMMEDIATE_VALUE) { uint32_t sampler = sampler_index.dw1.ud; brw_SAMPLE(p, dst, inst->base_mrf, src, sampler + base_binding_table_index, sampler % 16, msg_type, 1, /* response length */ inst->mlen, inst->header_present, BRW_SAMPLER_SIMD_MODE_SIMD4X2, return_format); brw_mark_surface_used(&prog_data->base, sampler + base_binding_table_index); } else { /* Non-constant sampler index. */ /* Note: this clobbers `dst` as a temporary before emitting the send */ struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD)); struct brw_reg temp = vec1(retype(dst, BRW_REGISTER_TYPE_UD)); struct brw_reg sampler_reg = vec1(retype(sampler_index, BRW_REGISTER_TYPE_UD)); brw_push_insn_state(p); brw_set_default_mask_control(p, BRW_MASK_DISABLE); brw_set_default_access_mode(p, BRW_ALIGN_1); /* Some care required: `sampler` and `temp` may alias: * addr = sampler & 0xff * temp = (sampler << 8) & 0xf00 * addr = addr | temp */ brw_ADD(p, addr, sampler_reg, brw_imm_ud(base_binding_table_index)); brw_SHL(p, temp, sampler_reg, brw_imm_ud(8u)); brw_AND(p, temp, temp, brw_imm_ud(0x0f00)); brw_AND(p, addr, addr, brw_imm_ud(0x0ff)); brw_OR(p, addr, addr, temp); /* a0.0 |= <descriptor> */ brw_inst *insn_or = brw_next_insn(p, BRW_OPCODE_OR); brw_set_sampler_message(p, insn_or, 0 /* surface */, 0 /* sampler */, msg_type, 1 /* rlen */, inst->mlen /* mlen */, inst->header_present /* header */, BRW_SAMPLER_SIMD_MODE_SIMD4X2, return_format); brw_inst_set_exec_size(p->brw, insn_or, BRW_EXECUTE_1); brw_inst_set_src1_reg_type(p->brw, insn_or, BRW_REGISTER_TYPE_UD); brw_set_src0(p, insn_or, addr); brw_set_dest(p, insn_or, addr); /* dst = send(offset, a0.0) */ brw_inst *insn_send = brw_next_insn(p, BRW_OPCODE_SEND); brw_set_dest(p, insn_send, dst); brw_set_src0(p, insn_send, src); brw_set_indirect_send_descriptor(p, insn_send, BRW_SFID_SAMPLER, addr); brw_pop_insn_state(p); /* visitor knows more than we do about the surface limit required, * so has already done marking. */ } }
void brw_emit_line_setup(struct brw_sf_compile *c, bool allocate) { struct brw_compile *p = &c->func; GLuint i; c->flag_value = 0xff; c->nr_verts = 2; if (allocate) alloc_regs(c); invert_det(c); copy_z_inv_w(c); if (c->has_flat_shading) do_flatshade_line(c); for (i = 0; i < c->nr_setup_regs; i++) { /* Pair of incoming attributes: */ struct brw_reg a0 = offset(c->vert[0], i); struct brw_reg a1 = offset(c->vert[1], i); GLushort pc, pc_persp, pc_linear; bool last = calculate_masks(c, i, &pc, &pc_persp, &pc_linear); if (pc_persp) { set_predicate_control_flag_value(p, c, pc_persp); brw_MUL(p, a0, a0, c->inv_w[0]); brw_MUL(p, a1, a1, c->inv_w[1]); } /* Calculate coefficients for position, color: */ if (pc_linear) { set_predicate_control_flag_value(p, c, pc_linear); brw_ADD(p, c->a1_sub_a0, a1, negate(a0)); brw_MUL(p, c->tmp, c->a1_sub_a0, c->dx0); brw_MUL(p, c->m1Cx, c->tmp, c->inv_det); brw_MUL(p, c->tmp, c->a1_sub_a0, c->dy0); brw_MUL(p, c->m2Cy, c->tmp, c->inv_det); } { set_predicate_control_flag_value(p, c, pc); /* start point for interpolation */ brw_MOV(p, c->m3C0, a0); /* Copy m0..m3 to URB. */ brw_urb_WRITE(p, brw_null_reg(), 0, brw_vec8_grf(0, 0), last ? BRW_URB_WRITE_EOT_COMPLETE : BRW_URB_WRITE_NO_FLAGS, 4, /* msg len */ 0, /* response len */ i*4, /* urb destination offset */ BRW_URB_SWIZZLE_TRANSPOSE); } } brw_set_default_predicate_control(p, BRW_PREDICATE_NONE); }
/* Line clipping, more or less following the following algorithm: * * for (p=0;p<MAX_PLANES;p++) { * if (clipmask & (1 << p)) { * GLfloat dp0 = DOTPROD( vtx0, plane[p] ); * GLfloat dp1 = DOTPROD( vtx1, plane[p] ); * * if (dp1 < 0.0f) { * GLfloat t = dp1 / (dp1 - dp0); * if (t > t1) t1 = t; * } else { * GLfloat t = dp0 / (dp0 - dp1); * if (t > t0) t0 = t; * } * * if (t0 + t1 >= 1.0) * return; * } * } * * interp( ctx, newvtx0, vtx0, vtx1, t0 ); * interp( ctx, newvtx1, vtx1, vtx0, t1 ); * */ static void clip_and_emit_line( struct brw_clip_compile *c ) { struct brw_codegen *p = &c->func; struct brw_indirect vtx0 = brw_indirect(0, 0); struct brw_indirect vtx1 = brw_indirect(1, 0); struct brw_indirect newvtx0 = brw_indirect(2, 0); struct brw_indirect newvtx1 = brw_indirect(3, 0); struct brw_indirect plane_ptr = brw_indirect(4, 0); struct brw_reg v1_null_ud = retype(vec1(brw_null_reg()), BRW_REGISTER_TYPE_UD); GLuint hpos_offset = brw_varying_to_offset(&c->vue_map, VARYING_SLOT_POS); GLint clipdist0_offset = c->key.nr_userclip ? brw_varying_to_offset(&c->vue_map, VARYING_SLOT_CLIP_DIST0) : 0; brw_MOV(p, get_addr_reg(vtx0), brw_address(c->reg.vertex[0])); brw_MOV(p, get_addr_reg(vtx1), brw_address(c->reg.vertex[1])); brw_MOV(p, get_addr_reg(newvtx0), brw_address(c->reg.vertex[2])); brw_MOV(p, get_addr_reg(newvtx1), brw_address(c->reg.vertex[3])); brw_MOV(p, get_addr_reg(plane_ptr), brw_clip_plane0_address(c)); /* Note: init t0, t1 together: */ brw_MOV(p, vec2(c->reg.t0), brw_imm_f(0)); brw_clip_init_planes(c); brw_clip_init_clipmask(c); /* -ve rhw workaround */ if (p->devinfo->has_negative_rhw_bug) { brw_AND(p, brw_null_reg(), get_element_ud(c->reg.R0, 2), brw_imm_ud(1<<20)); brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_NZ); brw_OR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud(0x3f)); brw_inst_set_pred_control(p->devinfo, brw_last_inst, BRW_PREDICATE_NORMAL); } /* Set the initial vertex source mask: The first 6 planes are the bounds * of the view volume; the next 8 planes are the user clipping planes. */ brw_MOV(p, c->reg.vertex_src_mask, brw_imm_ud(0x3fc0)); /* Set the initial clipdistance offset to be 6 floats before gl_ClipDistance[0]. * We'll increment 6 times before we start hitting actual user clipping. */ brw_MOV(p, c->reg.clipdistance_offset, brw_imm_d(clipdist0_offset - 6*sizeof(float))); brw_DO(p, BRW_EXECUTE_1); { /* if (planemask & 1) */ brw_AND(p, v1_null_ud, c->reg.planemask, brw_imm_ud(1)); brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_NZ); brw_IF(p, BRW_EXECUTE_1); { brw_AND(p, v1_null_ud, c->reg.vertex_src_mask, brw_imm_ud(1)); brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_NZ); brw_IF(p, BRW_EXECUTE_1); { /* user clip distance: just fetch the correct float from each vertex */ struct brw_indirect temp_ptr = brw_indirect(7, 0); brw_ADD(p, get_addr_reg(temp_ptr), get_addr_reg(vtx0), c->reg.clipdistance_offset); brw_MOV(p, c->reg.dp0, deref_1f(temp_ptr, 0)); brw_ADD(p, get_addr_reg(temp_ptr), get_addr_reg(vtx1), c->reg.clipdistance_offset); brw_MOV(p, c->reg.dp1, deref_1f(temp_ptr, 0)); } brw_ELSE(p); { /* fixed plane: fetch the hpos, dp4 against the plane. */ if (c->key.nr_userclip) brw_MOV(p, c->reg.plane_equation, deref_4f(plane_ptr, 0)); else brw_MOV(p, c->reg.plane_equation, deref_4b(plane_ptr, 0)); brw_DP4(p, vec4(c->reg.dp0), deref_4f(vtx0, hpos_offset), c->reg.plane_equation); brw_DP4(p, vec4(c->reg.dp1), deref_4f(vtx1, hpos_offset), c->reg.plane_equation); } brw_ENDIF(p); brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, vec1(c->reg.dp1), brw_imm_f(0.0f)); brw_IF(p, BRW_EXECUTE_1); { /* * Both can be negative on GM965/G965 due to RHW workaround * if so, this object should be rejected. */ if (p->devinfo->has_negative_rhw_bug) { brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_LE, c->reg.dp0, brw_imm_f(0.0)); brw_IF(p, BRW_EXECUTE_1); { brw_clip_kill_thread(c); } brw_ENDIF(p); } brw_ADD(p, c->reg.t, c->reg.dp1, negate(c->reg.dp0)); brw_math_invert(p, c->reg.t, c->reg.t); brw_MUL(p, c->reg.t, c->reg.t, c->reg.dp1); brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_G, c->reg.t, c->reg.t1 ); brw_MOV(p, c->reg.t1, c->reg.t); brw_inst_set_pred_control(p->devinfo, brw_last_inst, BRW_PREDICATE_NORMAL); } brw_ELSE(p); { /* Coming back in. We know that both cannot be negative * because the line would have been culled in that case. */ /* If both are positive, do nothing */ /* Only on GM965/G965 */ if (p->devinfo->has_negative_rhw_bug) { brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_L, c->reg.dp0, brw_imm_f(0.0)); brw_IF(p, BRW_EXECUTE_1); } { brw_ADD(p, c->reg.t, c->reg.dp0, negate(c->reg.dp1)); brw_math_invert(p, c->reg.t, c->reg.t); brw_MUL(p, c->reg.t, c->reg.t, c->reg.dp0); brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_G, c->reg.t, c->reg.t0 ); brw_MOV(p, c->reg.t0, c->reg.t); brw_inst_set_pred_control(p->devinfo, brw_last_inst, BRW_PREDICATE_NORMAL); } if (p->devinfo->has_negative_rhw_bug) { brw_ENDIF(p); } } brw_ENDIF(p); } brw_ENDIF(p); /* plane_ptr++; */ brw_ADD(p, get_addr_reg(plane_ptr), get_addr_reg(plane_ptr), brw_clip_plane_stride(c)); /* while (planemask>>=1) != 0 */ brw_SHR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud(1)); brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_NZ); brw_SHR(p, c->reg.vertex_src_mask, c->reg.vertex_src_mask, brw_imm_ud(1)); brw_inst_set_pred_control(p->devinfo, brw_last_inst, BRW_PREDICATE_NORMAL); brw_ADD(p, c->reg.clipdistance_offset, c->reg.clipdistance_offset, brw_imm_w(sizeof(float))); brw_inst_set_pred_control(p->devinfo, brw_last_inst, BRW_PREDICATE_NORMAL); } brw_WHILE(p); brw_inst_set_pred_control(p->devinfo, brw_last_inst, BRW_PREDICATE_NORMAL); brw_ADD(p, c->reg.t, c->reg.t0, c->reg.t1); brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_L, c->reg.t, brw_imm_f(1.0)); brw_IF(p, BRW_EXECUTE_1); { brw_clip_interp_vertex(c, newvtx0, vtx0, vtx1, c->reg.t0, false); brw_clip_interp_vertex(c, newvtx1, vtx1, vtx0, c->reg.t1, false); brw_clip_emit_vue(c, newvtx0, BRW_URB_WRITE_ALLOCATE_COMPLETE, (_3DPRIM_LINESTRIP << URB_WRITE_PRIM_TYPE_SHIFT) | URB_WRITE_PRIM_START); brw_clip_emit_vue(c, newvtx1, BRW_URB_WRITE_EOT_COMPLETE, (_3DPRIM_LINESTRIP << URB_WRITE_PRIM_TYPE_SHIFT) | URB_WRITE_PRIM_END); } brw_ENDIF(p); brw_clip_kill_thread(c); }
/** * Generate the geometry shader program used on Gen6 to perform stream output * (transform feedback). */ void gen6_sol_program(struct brw_gs_compile *c, struct brw_gs_prog_key *key, unsigned num_verts, bool check_edge_flags) { struct brw_compile *p = &c->func; c->prog_data.svbi_postincrement_value = num_verts; brw_gs_alloc_regs(c, num_verts, true); brw_gs_initialize_header(c); if (key->num_transform_feedback_bindings > 0) { unsigned vertex, binding; struct brw_reg destination_indices_uw = vec8(retype(c->reg.destination_indices, BRW_REGISTER_TYPE_UW)); /* Note: since we use the binding table to keep track of buffer offsets * and stride, the GS doesn't need to keep track of a separate pointer * into each buffer; it uses a single pointer which increments by 1 for * each vertex. So we use SVBI0 for this pointer, regardless of whether * transform feedback is in interleaved or separate attribs mode. * * Make sure that the buffers have enough room for all the vertices. */ brw_ADD(p, get_element_ud(c->reg.temp, 0), get_element_ud(c->reg.SVBI, 0), brw_imm_ud(num_verts)); brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_LE, get_element_ud(c->reg.temp, 0), get_element_ud(c->reg.SVBI, 4)); brw_IF(p, BRW_EXECUTE_1); /* Compute the destination indices to write to. Usually we use SVBI[0] * + (0, 1, 2). However, for odd-numbered triangles in tristrips, the * vertices come down the pipeline in reversed winding order, so we need * to flip the order when writing to the transform feedback buffer. To * ensure that flatshading accuracy is preserved, we need to write them * in order SVBI[0] + (0, 2, 1) if we're using the first provoking * vertex convention, and in order SVBI[0] + (1, 0, 2) if we're using * the last provoking vertex convention. * * Note: since brw_imm_v can only be used in instructions in * packed-word execution mode, and SVBI is a double-word, we need to * first move the appropriate immediate constant ((0, 1, 2), (0, 2, 1), * or (1, 0, 2)) to the destination_indices register, and then add SVBI * using a separate instruction. Also, since the immediate constant is * expressed as packed words, and we need to load double-words into * destination_indices, we need to intersperse zeros to fill the upper * halves of each double-word. */ brw_MOV(p, destination_indices_uw, brw_imm_v(0x00020100)); /* (0, 1, 2) */ if (num_verts == 3) { /* Get primitive type into temp register. */ brw_AND(p, get_element_ud(c->reg.temp, 0), get_element_ud(c->reg.R0, 2), brw_imm_ud(0x1f)); /* Test if primitive type is TRISTRIP_REVERSE. We need to do this as * an 8-wide comparison so that the conditional MOV that follows * moves all 8 words correctly. */ brw_CMP(p, vec8(brw_null_reg()), BRW_CONDITIONAL_EQ, get_element_ud(c->reg.temp, 0), brw_imm_ud(_3DPRIM_TRISTRIP_REVERSE)); /* If so, then overwrite destination_indices_uw with the appropriate * reordering. */ brw_MOV(p, destination_indices_uw, brw_imm_v(key->pv_first ? 0x00010200 /* (0, 2, 1) */ : 0x00020001)); /* (1, 0, 2) */ brw_set_predicate_control(p, BRW_PREDICATE_NONE); } brw_ADD(p, c->reg.destination_indices, c->reg.destination_indices, get_element_ud(c->reg.SVBI, 0)); /* For each vertex, generate code to output each varying using the * appropriate binding table entry. */ for (vertex = 0; vertex < num_verts; ++vertex) { /* Set up the correct destination index for this vertex */ brw_MOV(p, get_element_ud(c->reg.header, 5), get_element_ud(c->reg.destination_indices, vertex)); for (binding = 0; binding < key->num_transform_feedback_bindings; ++binding) { unsigned char varying = key->transform_feedback_bindings[binding]; unsigned char slot = c->vue_map.varying_to_slot[varying]; /* From the Sandybridge PRM, Volume 2, Part 1, Section 4.5.1: * * "Prior to End of Thread with a URB_WRITE, the kernel must * ensure that all writes are complete by sending the final * write as a committed write." */ bool final_write = binding == key->num_transform_feedback_bindings - 1 && vertex == num_verts - 1; struct brw_reg vertex_slot = c->reg.vertex[vertex]; vertex_slot.nr += slot / 2; vertex_slot.subnr = (slot % 2) * 16; /* gl_PointSize is stored in VARYING_SLOT_PSIZ.w. */ vertex_slot.dw1.bits.swizzle = varying == VARYING_SLOT_PSIZ ? BRW_SWIZZLE_WWWW : key->transform_feedback_swizzles[binding]; brw_set_access_mode(p, BRW_ALIGN_16); brw_MOV(p, stride(c->reg.header, 4, 4, 1), retype(vertex_slot, BRW_REGISTER_TYPE_UD)); brw_set_access_mode(p, BRW_ALIGN_1); brw_svb_write(p, final_write ? c->reg.temp : brw_null_reg(), /* dest */ 1, /* msg_reg_nr */ c->reg.header, /* src0 */ SURF_INDEX_SOL_BINDING(binding), /* binding_table_index */ final_write); /* send_commit_msg */ } } brw_ENDIF(p); /* Now, reinitialize the header register from R0 to restore the parts of * the register that we overwrote while streaming out transform feedback * data. */ brw_gs_initialize_header(c); /* Finally, wait for the write commit to occur so that we can proceed to * other things safely. * * From the Sandybridge PRM, Volume 4, Part 1, Section 3.3: * * The write commit does not modify the destination register, but * merely clears the dependency associated with the destination * register. Thus, a simple “mov” instruction using the register as a * source is sufficient to wait for the write commit to occur. */ brw_MOV(p, c->reg.temp, c->reg.temp); } brw_gs_ff_sync(c, 1); /* If RASTERIZER_DISCARD is enabled, we have nothing further to do, so * release the URB that was just allocated, and terminate the thread. */ if (key->rasterizer_discard) { brw_gs_terminate(c); return; } brw_gs_overwrite_header_dw2_from_r0(c); switch (num_verts) { case 1: brw_gs_offset_header_dw2(c, URB_WRITE_PRIM_START | URB_WRITE_PRIM_END); brw_gs_emit_vue(c, c->reg.vertex[0], true); break; case 2: brw_gs_offset_header_dw2(c, URB_WRITE_PRIM_START); brw_gs_emit_vue(c, c->reg.vertex[0], false); brw_gs_offset_header_dw2(c, URB_WRITE_PRIM_END - URB_WRITE_PRIM_START); brw_gs_emit_vue(c, c->reg.vertex[1], true); break; case 3: if (check_edge_flags) { /* Only emit vertices 0 and 1 if this is the first triangle of the * polygon. Otherwise they are redundant. */ brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ); brw_AND(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UD), get_element_ud(c->reg.R0, 2), brw_imm_ud(BRW_GS_EDGE_INDICATOR_0)); brw_IF(p, BRW_EXECUTE_1); } brw_gs_offset_header_dw2(c, URB_WRITE_PRIM_START); brw_gs_emit_vue(c, c->reg.vertex[0], false); brw_gs_offset_header_dw2(c, -URB_WRITE_PRIM_START); brw_gs_emit_vue(c, c->reg.vertex[1], false); if (check_edge_flags) { brw_ENDIF(p); /* Only emit vertex 2 in PRIM_END mode if this is the last triangle * of the polygon. Otherwise leave the primitive incomplete because * there are more polygon vertices coming. */ brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ); brw_AND(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UD), get_element_ud(c->reg.R0, 2), brw_imm_ud(BRW_GS_EDGE_INDICATOR_1)); brw_set_predicate_control(p, BRW_PREDICATE_NORMAL); } brw_gs_offset_header_dw2(c, URB_WRITE_PRIM_END); brw_set_predicate_control(p, BRW_PREDICATE_NONE); brw_gs_emit_vue(c, c->reg.vertex[2], true); break; } }
static void brw_wm_emit_glsl(struct brw_context *brw, struct brw_wm_compile *c) { #define MAX_IFSN 32 #define MAX_LOOP_DEPTH 32 struct brw_instruction *if_inst[MAX_IFSN], *loop_inst[MAX_LOOP_DEPTH]; struct brw_instruction *inst0, *inst1; int i, if_insn = 0, loop_insn = 0; struct brw_compile *p = &c->func; struct brw_indirect stack_index = brw_indirect(0, 0); c->reg_index = 0; prealloc_reg(c); brw_set_compression_control(p, BRW_COMPRESSION_NONE); brw_MOV(p, get_addr_reg(stack_index), brw_address(c->stack)); for (i = 0; i < c->nr_fp_insns; i++) { struct prog_instruction *inst = &c->prog_instructions[i]; struct prog_instruction *orig_inst; if ((orig_inst = inst->Data) != 0) orig_inst->Data = current_insn(p); if (inst->CondUpdate) brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ); else brw_set_conditionalmod(p, BRW_CONDITIONAL_NONE); switch (inst->Opcode) { case WM_PIXELXY: emit_pixel_xy(c, inst); break; case WM_DELTAXY: emit_delta_xy(c, inst); break; case WM_PIXELW: emit_pixel_w(c, inst); break; case WM_LINTERP: emit_linterp(c, inst); break; case WM_PINTERP: emit_pinterp(c, inst); break; case WM_CINTERP: emit_cinterp(c, inst); break; case WM_WPOSXY: emit_wpos_xy(c, inst); break; case WM_FB_WRITE: emit_fb_write(c, inst); break; case OPCODE_ABS: emit_abs(c, inst); break; case OPCODE_ADD: emit_add(c, inst); break; case OPCODE_SUB: emit_sub(c, inst); break; case OPCODE_FRC: emit_frc(c, inst); break; case OPCODE_FLR: emit_flr(c, inst); break; case OPCODE_LRP: emit_lrp(c, inst); break; case OPCODE_INT: emit_int(c, inst); break; case OPCODE_MOV: emit_mov(c, inst); break; case OPCODE_DP3: emit_dp3(c, inst); break; case OPCODE_DP4: emit_dp4(c, inst); break; case OPCODE_XPD: emit_xpd(c, inst); break; case OPCODE_DPH: emit_dph(c, inst); break; case OPCODE_RCP: emit_rcp(c, inst); break; case OPCODE_RSQ: emit_rsq(c, inst); break; case OPCODE_SIN: emit_sin(c, inst); break; case OPCODE_COS: emit_cos(c, inst); break; case OPCODE_EX2: emit_ex2(c, inst); break; case OPCODE_LG2: emit_lg2(c, inst); break; case OPCODE_MAX: emit_max(c, inst); break; case OPCODE_MIN: emit_min(c, inst); break; case OPCODE_DDX: emit_ddx(c, inst); break; case OPCODE_DDY: emit_ddy(c, inst); break; case OPCODE_SLT: emit_slt(c, inst); break; case OPCODE_SLE: emit_sle(c, inst); break; case OPCODE_SGT: emit_sgt(c, inst); break; case OPCODE_SGE: emit_sge(c, inst); break; case OPCODE_SEQ: emit_seq(c, inst); break; case OPCODE_SNE: emit_sne(c, inst); break; case OPCODE_MUL: emit_mul(c, inst); break; case OPCODE_POW: emit_pow(c, inst); break; case OPCODE_MAD: emit_mad(c, inst); break; case OPCODE_TEX: emit_tex(c, inst); break; case OPCODE_TXB: emit_txb(c, inst); break; case OPCODE_KIL_NV: emit_kil(c); break; case OPCODE_IF: assert(if_insn < MAX_IFSN); if_inst[if_insn++] = brw_IF(p, BRW_EXECUTE_8); break; case OPCODE_ELSE: if_inst[if_insn-1] = brw_ELSE(p, if_inst[if_insn-1]); break; case OPCODE_ENDIF: assert(if_insn > 0); brw_ENDIF(p, if_inst[--if_insn]); break; case OPCODE_BGNSUB: case OPCODE_ENDSUB: break; case OPCODE_CAL: brw_push_insn_state(p); brw_set_mask_control(p, BRW_MASK_DISABLE); brw_set_access_mode(p, BRW_ALIGN_1); brw_ADD(p, deref_1ud(stack_index, 0), brw_ip_reg(), brw_imm_d(3*16)); brw_set_access_mode(p, BRW_ALIGN_16); brw_ADD(p, get_addr_reg(stack_index), get_addr_reg(stack_index), brw_imm_d(4)); orig_inst = inst->Data; orig_inst->Data = &p->store[p->nr_insn]; brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16)); brw_pop_insn_state(p); break; case OPCODE_RET: brw_push_insn_state(p); brw_set_mask_control(p, BRW_MASK_DISABLE); brw_ADD(p, get_addr_reg(stack_index), get_addr_reg(stack_index), brw_imm_d(-4)); brw_set_access_mode(p, BRW_ALIGN_1); brw_MOV(p, brw_ip_reg(), deref_1ud(stack_index, 0)); brw_set_access_mode(p, BRW_ALIGN_16); brw_pop_insn_state(p); break; case OPCODE_BGNLOOP: loop_inst[loop_insn++] = brw_DO(p, BRW_EXECUTE_8); break; case OPCODE_BRK: brw_BREAK(p); brw_set_predicate_control(p, BRW_PREDICATE_NONE); break; case OPCODE_CONT: brw_CONT(p); brw_set_predicate_control(p, BRW_PREDICATE_NONE); break; case OPCODE_ENDLOOP: loop_insn--; inst0 = inst1 = brw_WHILE(p, loop_inst[loop_insn]); /* patch all the BREAK instructions from last BEGINLOOP */ while (inst0 > loop_inst[loop_insn]) { inst0--; if (inst0->header.opcode == BRW_OPCODE_BREAK) { inst0->bits3.if_else.jump_count = inst1 - inst0 + 1; inst0->bits3.if_else.pop_count = 0; } else if (inst0->header.opcode == BRW_OPCODE_CONTINUE) { inst0->bits3.if_else.jump_count = inst1 - inst0; inst0->bits3.if_else.pop_count = 0; } } break; default: _mesa_printf("unsupported IR in fragment shader %d\n", inst->Opcode); } if (inst->CondUpdate) brw_set_predicate_control(p, BRW_PREDICATE_NORMAL); else brw_set_predicate_control(p, BRW_PREDICATE_NONE); } post_wm_emit(c); for (i = 0; i < c->fp->program.Base.NumInstructions; i++) c->fp->program.Base.Instructions[i].Data = NULL; }