Пример #1
0
static void do_twoside_color( struct brw_sf_compile *c )
{
   struct brw_compile *p = &c->func;
   GLuint backface_conditional = c->key.frontface_ccw ? BRW_CONDITIONAL_G : BRW_CONDITIONAL_L;

   /* Already done in clip program:
    */
   if (c->key.primitive == SF_UNFILLED_TRIS)
      return;

   /* If the vertex shader provides backface color, do the selection. The VS
    * promises to set up the front color if the backface color is provided, but
    * it may contain junk if never written to.
    */
   if (!(have_attr(c, VARYING_SLOT_COL0) && have_attr(c, VARYING_SLOT_BFC0)) &&
       !(have_attr(c, VARYING_SLOT_COL1) && have_attr(c, VARYING_SLOT_BFC1)))
      return;

   /* Need to use BRW_EXECUTE_4 and also do an 4-wide compare in order
    * to get all channels active inside the IF.  In the clipping code
    * we run with NoMask, so it's not an option and we can use
    * BRW_EXECUTE_1 for all comparisions.
    */
   brw_CMP(p, vec4(brw_null_reg()), backface_conditional, c->det, brw_imm_f(0));
   brw_IF(p, BRW_EXECUTE_4);
   {
      switch (c->nr_verts) {
      case 3: copy_bfc(c, c->vert[2]);
      case 2: copy_bfc(c, c->vert[1]);
      case 1: copy_bfc(c, c->vert[0]);
      }
   }
   brw_ENDIF(p);
}
Пример #2
0
static void merge_edgeflags( struct brw_clip_compile *c )
{
   struct brw_compile *p = &c->func;
   struct brw_reg tmp0 = get_element_ud(c->reg.tmp0, 0);

   brw_AND(p, tmp0, get_element_ud(c->reg.R0, 2), brw_imm_ud(PRIM_MASK)); 
   brw_CMP(p, 
	   vec1(brw_null_reg()), 
	   BRW_CONDITIONAL_EQ, 
	   tmp0,
	   brw_imm_ud(_3DPRIM_POLYGON));

   /* Get away with using reg.vertex because we know that this is not
    * a _3DPRIM_TRISTRIP_REVERSE:
    */
   brw_IF(p, BRW_EXECUTE_1);
   {   
      brw_set_conditionalmod(p, BRW_CONDITIONAL_EQ);
      brw_AND(p, vec1(brw_null_reg()), get_element_ud(c->reg.R0, 2), brw_imm_ud(1<<8));
      brw_MOV(p, byte_offset(c->reg.vertex[0],
                             brw_varying_to_offset(&c->vue_map,
                                                   VARYING_SLOT_EDGE)),
              brw_imm_f(0));
      brw_set_predicate_control(p, BRW_PREDICATE_NONE);

      brw_set_conditionalmod(p, BRW_CONDITIONAL_EQ);
      brw_AND(p, vec1(brw_null_reg()), get_element_ud(c->reg.R0, 2), brw_imm_ud(1<<9));
      brw_MOV(p, byte_offset(c->reg.vertex[2],
                             brw_varying_to_offset(&c->vue_map,
                                                   VARYING_SLOT_EDGE)),
              brw_imm_f(0));
      brw_set_predicate_control(p, BRW_PREDICATE_NONE);
   }
   brw_ENDIF(p);
}
Пример #3
0
static void emit_unfilled_primitives( struct brw_clip_compile *c )
{
   struct brw_compile *p = &c->func;

   /* Direction culling has already been done.
    */
   if (c->key.fill_ccw != c->key.fill_cw &&
       c->key.fill_ccw != CLIP_CULL &&
       c->key.fill_cw != CLIP_CULL)
   {
      brw_CMP(p,
	      vec1(brw_null_reg()),
	      BRW_CONDITIONAL_GE,
	      get_element(c->reg.dir, 2),
	      brw_imm_f(0));

      brw_IF(p, BRW_EXECUTE_1);
      {
	 emit_primitives(c, c->key.fill_ccw, c->key.offset_ccw);
      }
      brw_ELSE(p);
      {
	 emit_primitives(c, c->key.fill_cw, c->key.offset_cw);
      }
      brw_ENDIF(p);
   }
   else if (c->key.fill_cw != CLIP_CULL) {
      emit_primitives(c, c->key.fill_cw, c->key.offset_cw);
   }
   else if (c->key.fill_ccw != CLIP_CULL) {
      emit_primitives(c, c->key.fill_ccw, c->key.offset_ccw);
   }
}
Пример #4
0
static void cull_direction( struct brw_clip_compile *c )
{
   struct brw_compile *p = &c->func;
   GLuint conditional;

   assert (!(c->key.fill_ccw == CLIP_CULL &&
	     c->key.fill_cw == CLIP_CULL));

   if (c->key.fill_ccw == CLIP_CULL)
      conditional = BRW_CONDITIONAL_GE;
   else
      conditional = BRW_CONDITIONAL_L;

   brw_CMP(p,
	   vec1(brw_null_reg()),
	   conditional,
	   get_element(c->reg.dir, 2),
	   brw_imm_f(0));

   brw_IF(p, BRW_EXECUTE_1);
   {
      brw_clip_kill_thread(c);
   }
   brw_ENDIF(p);
}
Пример #5
0
void brw_clip_ff_sync(struct brw_clip_compile *c)
{
    if (c->need_ff_sync) {
        struct brw_compile *p = &c->func;
        struct brw_instruction *need_ff_sync;

        brw_set_conditionalmod(p, BRW_CONDITIONAL_Z);
        brw_AND(p, brw_null_reg(), c->reg.ff_sync, brw_imm_ud(0x1));
        need_ff_sync = brw_IF(p, BRW_EXECUTE_1);
        {
            brw_OR(p, c->reg.ff_sync, c->reg.ff_sync, brw_imm_ud(0x1));
            brw_ff_sync(p, 
                    c->reg.R0,
                    0,
                    c->reg.R0,
                    1,	
                    1,		/* used */
                    1,  	/* msg length */
                    1,		/* response length */
                    0,		/* eot */
                    1,		/* write compelete */
                    0,		/* urb offset */
                    BRW_URB_SWIZZLE_NONE);
        }
        brw_ENDIF(p, need_ff_sync);
        brw_set_predicate_control(p, BRW_PREDICATE_NONE);
    }
}
Пример #6
0
void brw_emit_unfilled_clip( struct brw_clip_compile *c )
{
   struct brw_compile *p = &c->func;

   c->need_direction = ((c->key.offset_ccw || c->key.offset_cw) ||
			(c->key.fill_ccw != c->key.fill_cw) ||
			c->key.fill_ccw == CLIP_CULL ||
			c->key.fill_cw == CLIP_CULL ||
			c->key.copy_bfc_cw ||
			c->key.copy_bfc_ccw);

   brw_clip_tri_alloc_regs(c, 3 + c->key.nr_userclip + 6);
   brw_clip_tri_init_vertices(c);
   brw_clip_init_ff_sync(c);

   assert(brw_clip_have_varying(c, VARYING_SLOT_EDGE));

   if (c->key.fill_ccw == CLIP_CULL &&
       c->key.fill_cw == CLIP_CULL) {
      brw_clip_kill_thread(c);
      return;
   }

   merge_edgeflags(c);

   /* Need to use the inlist indirection here:
    */
   if (c->need_direction)
      compute_tri_direction(c);

   if (c->key.fill_ccw == CLIP_CULL ||
       c->key.fill_cw == CLIP_CULL)
      cull_direction(c);

   if (c->key.offset_ccw ||
       c->key.offset_cw)
      compute_offset(c);

   if (c->key.copy_bfc_ccw ||
       c->key.copy_bfc_cw)
      copy_bfc(c);

   /* Need to do this whether we clip or not:
    */
   if (c->has_flat_shading)
      brw_clip_tri_flat_shade(c);

   brw_clip_init_clipmask(c);
   brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_NZ, c->reg.planemask, brw_imm_ud(0));
   brw_IF(p, BRW_EXECUTE_1);
   {
      brw_clip_init_planes(c);
      brw_clip_tri(c);
      check_nr_verts(c);
   }
   brw_ENDIF(p);

   emit_unfilled_primitives(c);
   brw_clip_kill_thread(c);
}
Пример #7
0
void brw_clip_tri_flat_shade( struct brw_clip_compile *c )
{
   struct brw_compile *p = &c->func;
   struct brw_instruction *is_poly;
   struct brw_reg tmp0 = c->reg.loopcount; /* handy temporary */

   brw_AND(p, tmp0, get_element_ud(c->reg.R0, 2), brw_imm_ud(PRIM_MASK)); 
   brw_CMP(p, 
	   vec1(brw_null_reg()), 
	   BRW_CONDITIONAL_EQ, 
	   tmp0,
	   brw_imm_ud(_3DPRIM_POLYGON));

   is_poly = brw_IF(p, BRW_EXECUTE_1);
   {   
      brw_clip_copy_colors(c, 1, 0);
      brw_clip_copy_colors(c, 2, 0);
   }
   is_poly = brw_ELSE(p, is_poly);
   {
      brw_clip_copy_colors(c, 0, 2);
      brw_clip_copy_colors(c, 1, 2);
   }
   brw_ENDIF(p, is_poly);
}
Пример #8
0
void brw_clip_tri_flat_shade( struct brw_clip_compile *c )
{
   struct brw_codegen *p = &c->func;
   struct brw_reg tmp0 = c->reg.loopcount; /* handy temporary */

   brw_AND(p, tmp0, get_element_ud(c->reg.R0, 2), brw_imm_ud(PRIM_MASK));
   brw_CMP(p,
	   vec1(brw_null_reg()),
	   BRW_CONDITIONAL_EQ,
	   tmp0,
	   brw_imm_ud(_3DPRIM_POLYGON));

   brw_IF(p, BRW_EXECUTE_1);
   {
      brw_clip_copy_flatshaded_attributes(c, 1, 0);
      brw_clip_copy_flatshaded_attributes(c, 2, 0);
   }
   brw_ELSE(p);
   {
      if (c->key.pv_first) {
	 brw_CMP(p,
		 vec1(brw_null_reg()),
		 BRW_CONDITIONAL_EQ,
		 tmp0,
		 brw_imm_ud(_3DPRIM_TRIFAN));
	 brw_IF(p, BRW_EXECUTE_1);
	 {
	    brw_clip_copy_flatshaded_attributes(c, 0, 1);
	    brw_clip_copy_flatshaded_attributes(c, 2, 1);
	 }
	 brw_ELSE(p);
	 {
	    brw_clip_copy_flatshaded_attributes(c, 1, 0);
	    brw_clip_copy_flatshaded_attributes(c, 2, 0);
	 }
	 brw_ENDIF(p);
      }
      else {
         brw_clip_copy_flatshaded_attributes(c, 0, 2);
         brw_clip_copy_flatshaded_attributes(c, 1, 2);
      }
   }
   brw_ENDIF(p);
}
Пример #9
0
static void copy_bfc( struct brw_clip_compile *c )
{
   struct brw_compile *p = &c->func;
   GLuint conditional;

   /* Do we have any colors to copy?
    */
   if (!(brw_clip_have_varying(c, VARYING_SLOT_COL0) &&
         brw_clip_have_varying(c, VARYING_SLOT_BFC0)) &&
       !(brw_clip_have_varying(c, VARYING_SLOT_COL1) &&
         brw_clip_have_varying(c, VARYING_SLOT_BFC1)))
      return;

   /* In some wierd degnerate cases we can end up testing the
    * direction twice, once for culling and once for bfc copying.  Oh
    * well, that's what you get for setting wierd GL state.
    */
   if (c->key.copy_bfc_ccw)
      conditional = BRW_CONDITIONAL_GE;
   else
      conditional = BRW_CONDITIONAL_L;

   brw_CMP(p,
	   vec1(brw_null_reg()),
	   conditional,
	   get_element(c->reg.dir, 2),
	   brw_imm_f(0));

   brw_IF(p, BRW_EXECUTE_1);
   {
      GLuint i;

      for (i = 0; i < 3; i++) {
	 if (brw_clip_have_varying(c, VARYING_SLOT_COL0) &&
             brw_clip_have_varying(c, VARYING_SLOT_BFC0))
	    brw_MOV(p,
		    byte_offset(c->reg.vertex[i],
                                brw_varying_to_offset(&c->vue_map,
                                                      VARYING_SLOT_COL0)),
		    byte_offset(c->reg.vertex[i],
                                brw_varying_to_offset(&c->vue_map,
                                                      VARYING_SLOT_BFC0)));

	 if (brw_clip_have_varying(c, VARYING_SLOT_COL1) &&
             brw_clip_have_varying(c, VARYING_SLOT_BFC1))
	    brw_MOV(p,
		    byte_offset(c->reg.vertex[i],
                                brw_varying_to_offset(&c->vue_map,
                                                      VARYING_SLOT_COL1)),
		    byte_offset(c->reg.vertex[i],
                                brw_varying_to_offset(&c->vue_map,
                                                      VARYING_SLOT_BFC1)));
      }
   }
   brw_ENDIF(p);
}
Пример #10
0
static void check_nr_verts( struct brw_clip_compile *c )
{
   struct brw_compile *p = &c->func;

   brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_L, c->reg.nr_verts, brw_imm_d(3));
   brw_IF(p, BRW_EXECUTE_1);
   {
      brw_clip_kill_thread(c);
   }
   brw_ENDIF(p);
}
Пример #11
0
static void maybe_do_clip_tri( struct brw_clip_compile *c )
{
   struct brw_compile *p = &c->func;

   brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_NZ, c->reg.planemask, brw_imm_ud(0));
   brw_IF(p, BRW_EXECUTE_1);
   {
      do_clip_tri(c);
   }
   brw_ENDIF(p);
}
Пример #12
0
void brw_clip_tri_emit_polygon(struct brw_clip_compile *c)
{
   struct brw_codegen *p = &c->func;

   /* for (loopcount = nr_verts-2; loopcount > 0; loopcount--)
    */
   brw_ADD(p,
	   c->reg.loopcount,
	   c->reg.nr_verts,
	   brw_imm_d(-2));
   brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_G);

   brw_IF(p, BRW_EXECUTE_1);
   {
      struct brw_indirect v0 = brw_indirect(0, 0);
      struct brw_indirect vptr = brw_indirect(1, 0);

      brw_MOV(p, get_addr_reg(vptr), brw_address(c->reg.inlist));
      brw_MOV(p, get_addr_reg(v0), deref_1uw(vptr, 0));

      brw_clip_emit_vue(c, v0, BRW_URB_WRITE_ALLOCATE_COMPLETE,
                        ((_3DPRIM_TRIFAN << URB_WRITE_PRIM_TYPE_SHIFT)
                         | URB_WRITE_PRIM_START));

      brw_ADD(p, get_addr_reg(vptr), get_addr_reg(vptr), brw_imm_uw(2));
      brw_MOV(p, get_addr_reg(v0), deref_1uw(vptr, 0));

      brw_DO(p, BRW_EXECUTE_1);
      {
	 brw_clip_emit_vue(c, v0, BRW_URB_WRITE_ALLOCATE_COMPLETE,
                           (_3DPRIM_TRIFAN << URB_WRITE_PRIM_TYPE_SHIFT));

	 brw_ADD(p, get_addr_reg(vptr), get_addr_reg(vptr), brw_imm_uw(2));
	 brw_MOV(p, get_addr_reg(v0), deref_1uw(vptr, 0));

	 brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1));
         brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_NZ);
      }
      brw_WHILE(p);
      brw_inst_set_pred_control(p->devinfo, brw_last_inst, BRW_PREDICATE_NORMAL);

      brw_clip_emit_vue(c, v0, BRW_URB_WRITE_EOT_COMPLETE,
                        ((_3DPRIM_TRIFAN << URB_WRITE_PRIM_TYPE_SHIFT)
                         | URB_WRITE_PRIM_END));
   }
   brw_ENDIF(p);
}
Пример #13
0
void brw_clip_tri_emit_polygon(struct brw_clip_compile *c)
{
   struct brw_compile *p = &c->func;

   /* for (loopcount = nr_verts-2; loopcount > 0; loopcount--)
    */
   brw_set_conditionalmod(p, BRW_CONDITIONAL_G);
   brw_ADD(p,
	   c->reg.loopcount,
	   c->reg.nr_verts,
	   brw_imm_d(-2));

   brw_IF(p, BRW_EXECUTE_1);
   {
      struct brw_indirect v0 = brw_indirect(0, 0);
      struct brw_indirect vptr = brw_indirect(1, 0);

      brw_MOV(p, get_addr_reg(vptr), brw_address(c->reg.inlist));
      brw_MOV(p, get_addr_reg(v0), deref_1uw(vptr, 0));

      brw_clip_emit_vue(c, v0, 1, 0,
                        ((_3DPRIM_TRIFAN << URB_WRITE_PRIM_TYPE_SHIFT)
                         | URB_WRITE_PRIM_START));
      
      brw_ADD(p, get_addr_reg(vptr), get_addr_reg(vptr), brw_imm_uw(2));
      brw_MOV(p, get_addr_reg(v0), deref_1uw(vptr, 0));

      brw_DO(p, BRW_EXECUTE_1);
      {
	 brw_clip_emit_vue(c, v0, 1, 0,
                           (_3DPRIM_TRIFAN << URB_WRITE_PRIM_TYPE_SHIFT));
  
	 brw_ADD(p, get_addr_reg(vptr), get_addr_reg(vptr), brw_imm_uw(2));
	 brw_MOV(p, get_addr_reg(v0), deref_1uw(vptr, 0));

	 brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
	 brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1));
      }
      brw_WHILE(p);

      brw_clip_emit_vue(c, v0, 0, 1,
                        ((_3DPRIM_TRIFAN << URB_WRITE_PRIM_TYPE_SHIFT)
                         | URB_WRITE_PRIM_END));
   }
   brw_ENDIF(p);
}
Пример #14
0
static void copy_bfc( struct brw_clip_compile *c )
{
   struct brw_compile *p = &c->func;
   struct brw_instruction *ccw;
   GLuint conditional;

   /* Do we have any colors to copy? 
    */
   if (!(c->offset[VERT_RESULT_COL0] && c->offset[VERT_RESULT_BFC0]) &&
       !(c->offset[VERT_RESULT_COL1] && c->offset[VERT_RESULT_BFC1]))
      return;

   /* In some wierd degnerate cases we can end up testing the
    * direction twice, once for culling and once for bfc copying.  Oh
    * well, that's what you get for setting wierd GL state.
    */
   if (c->key.copy_bfc_ccw)
      conditional = BRW_CONDITIONAL_GE;
   else
      conditional = BRW_CONDITIONAL_L;

   brw_CMP(p,
	   vec1(brw_null_reg()),
	   conditional,
	   get_element(c->reg.dir, 2),
	   brw_imm_f(0));
   
   ccw = brw_IF(p, BRW_EXECUTE_1);
   {
      GLuint i;

      for (i = 0; i < 3; i++) {
	 if (c->offset[VERT_RESULT_COL0] && c->offset[VERT_RESULT_BFC0])
	    brw_MOV(p, 
		    byte_offset(c->reg.vertex[i], c->offset[VERT_RESULT_COL0]),
		    byte_offset(c->reg.vertex[i], c->offset[VERT_RESULT_BFC0]));

	 if (c->offset[VERT_RESULT_COL1] && c->offset[VERT_RESULT_BFC1])
	    brw_MOV(p, 
		    byte_offset(c->reg.vertex[i], c->offset[VERT_RESULT_COL1]),
		    byte_offset(c->reg.vertex[i], c->offset[VERT_RESULT_BFC1]));
      }
   }
   brw_ENDIF(p, ccw);
}
Пример #15
0
static void emit_points(struct brw_clip_compile *c,
			bool do_offset )
{
   struct brw_compile *p = &c->func;
   const struct brw_context *brw = p->brw;

   struct brw_indirect v0 = brw_indirect(0, 0);
   struct brw_indirect v0ptr = brw_indirect(2, 0);

   brw_MOV(p, c->reg.loopcount, c->reg.nr_verts);
   brw_MOV(p, get_addr_reg(v0ptr), brw_address(c->reg.inlist));

   brw_DO(p, BRW_EXECUTE_1);
   {
      brw_MOV(p, get_addr_reg(v0), deref_1uw(v0ptr, 0));
      brw_ADD(p, get_addr_reg(v0ptr), get_addr_reg(v0ptr), brw_imm_uw(2));

      /* draw if edgeflag != 0
       */
      brw_CMP(p,
	      vec1(brw_null_reg()), BRW_CONDITIONAL_NZ,
	      deref_1f(v0, brw_varying_to_offset(&c->vue_map,
                                                 VARYING_SLOT_EDGE)),
	      brw_imm_f(0));
      brw_IF(p, BRW_EXECUTE_1);
      {
	 if (do_offset)
	    apply_one_offset(c, v0);

	 brw_clip_emit_vue(c, v0, BRW_URB_WRITE_ALLOCATE_COMPLETE,
                           (_3DPRIM_POINTLIST << URB_WRITE_PRIM_TYPE_SHIFT)
                           | URB_WRITE_PRIM_START | URB_WRITE_PRIM_END);
      }
      brw_ENDIF(p);

      brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1));
      brw_inst_set_cond_modifier(brw, brw_last_inst, BRW_CONDITIONAL_NZ);
   }
   brw_WHILE(p);
   brw_inst_set_pred_control(brw, brw_last_inst, BRW_PREDICATE_NORMAL);
}
Пример #16
0
void brw_emit_tri_clip( struct brw_clip_compile *c )
{
   struct brw_instruction *neg_rhw;
   struct brw_compile *p = &c->func;
   brw_clip_tri_alloc_regs(c, 3 + c->key.nr_userclip + 6);
   brw_clip_tri_init_vertices(c);
   brw_clip_init_clipmask(c);
   brw_clip_init_ff_sync(c);

   /* if -ve rhw workaround bit is set, 
      do cliptest */
   if (c->chipset.is_965) {
      brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
      brw_AND(p, brw_null_reg(), get_element_ud(c->reg.R0, 2), 
              brw_imm_ud(1<<20));
      neg_rhw = brw_IF(p, BRW_EXECUTE_1); 
      {
         brw_clip_test(c);
      }
      brw_ENDIF(p, neg_rhw);
   }
   /* Can't push into do_clip_tri because with polygon (or quad)
    * flatshading, need to apply the flatshade here because we don't
    * respect the PV when converting to trifan for emit:
    */
   if (c->key.do_flat_shading) 
      brw_clip_tri_flat_shade(c); 
      
   if ((c->key.clip_mode == BRW_CLIPMODE_NORMAL) ||
       (c->key.clip_mode == BRW_CLIPMODE_KERNEL_CLIP))
      do_clip_tri(c);
   else 
      maybe_do_clip_tri(c);

   brw_clip_tri_emit_polygon(c);

   /* Send an empty message to kill the thread:
    */
   brw_clip_kill_thread(c);
}
Пример #17
0
static void do_twoside_color( struct brw_sf_compile *c )
{
    struct brw_compile *p = &c->func;
    GLuint backface_conditional = c->key.frontface_ccw ? BRW_CONDITIONAL_G : BRW_CONDITIONAL_L;

    /* Already done in clip program:
     */
    if (c->key.primitive == SF_UNFILLED_TRIS)
        return;

    /* XXX: What happens if BFC isn't present?  This could only happen
     * for user-supplied vertex programs, as t_vp_build.c always does
     * the right thing.
     */
    if (!(have_attr(c, VERT_RESULT_COL0) && have_attr(c, VERT_RESULT_BFC0)) &&
            !(have_attr(c, VERT_RESULT_COL1) && have_attr(c, VERT_RESULT_BFC1)))
        return;

    /* Need to use BRW_EXECUTE_4 and also do an 4-wide compare in order
     * to get all channels active inside the IF.  In the clipping code
     * we run with NoMask, so it's not an option and we can use
     * BRW_EXECUTE_1 for all comparisions.
     */
    brw_push_insn_state(p);
    brw_CMP(p, vec4(brw_null_reg()), backface_conditional, c->det, brw_imm_f(0));
    brw_IF(p, BRW_EXECUTE_4);
    {
        switch (c->nr_verts) {
        case 3:
            copy_bfc(c, c->vert[2]);
        case 2:
            copy_bfc(c, c->vert[1]);
        case 1:
            copy_bfc(c, c->vert[0]);
        }
    }
    brw_ENDIF(p);
    brw_pop_insn_state(p);
}
Пример #18
0
static void emit_points(struct brw_clip_compile *c,
			bool do_offset )
{
   struct brw_compile *p = &c->func;

   struct brw_indirect v0 = brw_indirect(0, 0);
   struct brw_indirect v0ptr = brw_indirect(2, 0);

   brw_MOV(p, c->reg.loopcount, c->reg.nr_verts);
   brw_MOV(p, get_addr_reg(v0ptr), brw_address(c->reg.inlist));

   brw_DO(p, BRW_EXECUTE_1);
   {
      brw_MOV(p, get_addr_reg(v0), deref_1uw(v0ptr, 0));
      brw_ADD(p, get_addr_reg(v0ptr), get_addr_reg(v0ptr), brw_imm_uw(2));

      /* draw if edgeflag != 0 
       */
      brw_CMP(p, 
	      vec1(brw_null_reg()), BRW_CONDITIONAL_NZ, 
	      deref_1f(v0, brw_vert_result_to_offset(&c->vue_map,
                                                     VARYING_SLOT_EDGE)),
	      brw_imm_f(0));
      brw_IF(p, BRW_EXECUTE_1);
      {
	 if (do_offset)
	    apply_one_offset(c, v0);

	 brw_clip_emit_vue(c, v0, 1, 0,
                           (_3DPRIM_POINTLIST << URB_WRITE_PRIM_TYPE_SHIFT)
                           | URB_WRITE_PRIM_START | URB_WRITE_PRIM_END);
      }
      brw_ENDIF(p);

      brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
      brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1));
   }
   brw_WHILE(p);
}
void brw_clip_ff_sync(struct brw_clip_compile *c)
{
    struct brw_codegen *p = &c->func;

    if (p->devinfo->gen == 5) {
        brw_AND(p, brw_null_reg(), c->reg.ff_sync, brw_imm_ud(0x1));
        brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_Z);
        brw_IF(p, BRW_EXECUTE_1);
        {
            brw_OR(p, c->reg.ff_sync, c->reg.ff_sync, brw_imm_ud(0x1));
            brw_ff_sync(p,
			c->reg.R0,
			0,
			c->reg.R0,
			1, /* allocate */
			1, /* response length */
			0 /* eot */);
        }
        brw_ENDIF(p);
        brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
    }
}
Пример #20
0
static void emit_points(struct brw_clip_compile *c,
			GLboolean do_offset )
{
   struct brw_compile *p = &c->func;
   struct brw_instruction *loop;
   struct brw_instruction *draw_point;

   struct brw_indirect v0 = brw_indirect(0, 0);
   struct brw_indirect v0ptr = brw_indirect(2, 0);

   brw_MOV(p, c->reg.loopcount, c->reg.nr_verts);
   brw_MOV(p, get_addr_reg(v0ptr), brw_address(c->reg.inlist));

   loop = brw_DO(p, BRW_EXECUTE_1);
   {
      brw_MOV(p, get_addr_reg(v0), deref_1uw(v0ptr, 0));
      brw_ADD(p, get_addr_reg(v0ptr), get_addr_reg(v0ptr), brw_imm_uw(2));

      /* draw if edgeflag != 0 
       */
      brw_CMP(p, 
	      vec1(brw_null_reg()), BRW_CONDITIONAL_NZ, 
	      deref_1f(v0, c->offset[VERT_RESULT_EDGE]),
	      brw_imm_f(0));
      draw_point = brw_IF(p, BRW_EXECUTE_1);
      {
	 if (do_offset)
	    apply_one_offset(c, v0);

	 brw_clip_emit_vue(c, v0, 1, 0, (_3DPRIM_POINTLIST << 2) | R02_PRIM_START | R02_PRIM_END);
      }
      brw_ENDIF(p, draw_point);

      brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
      brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1));
   }
   brw_WHILE(p, loop);
}
Пример #21
0
void brw_clip_tri_init_vertices( struct brw_clip_compile *c )
{
   struct brw_compile *p = &c->func;
   struct brw_reg tmp0 = c->reg.loopcount; /* handy temporary */
   struct brw_instruction *is_rev;

   /* Initial list of indices for incoming vertexes:
    */
   brw_AND(p, tmp0, get_element_ud(c->reg.R0, 2), brw_imm_ud(PRIM_MASK)); 
   brw_CMP(p, 
	   vec1(brw_null_reg()), 
	   BRW_CONDITIONAL_EQ, 
	   tmp0,
	   brw_imm_ud(_3DPRIM_TRISTRIP_REVERSE));

   /* XXX: Is there an easier way to do this?  Need to reverse every
    * second tristrip element:  Can ignore sometimes?
    */
   is_rev = brw_IF(p, BRW_EXECUTE_1);
   {   
      brw_MOV(p, get_element(c->reg.inlist, 0),  brw_address(c->reg.vertex[1]) );
      brw_MOV(p, get_element(c->reg.inlist, 1),  brw_address(c->reg.vertex[0]) );
      if (c->need_direction)
	 brw_MOV(p, c->reg.dir, brw_imm_f(-1));
   }
   is_rev = brw_ELSE(p, is_rev);
   {
      brw_MOV(p, get_element(c->reg.inlist, 0),  brw_address(c->reg.vertex[0]) );
      brw_MOV(p, get_element(c->reg.inlist, 1),  brw_address(c->reg.vertex[1]) );
      if (c->need_direction)
	 brw_MOV(p, c->reg.dir, brw_imm_f(1));
   }
   brw_ENDIF(p, is_rev);

   brw_MOV(p, get_element(c->reg.inlist, 2),  brw_address(c->reg.vertex[2]) );
   brw_MOV(p, brw_vec8_grf(c->reg.outlist.nr, 0), brw_imm_f(0));
   brw_MOV(p, c->reg.nr_verts, brw_imm_ud(3));
}
Пример #22
0
void brw_clip_ff_sync(struct brw_clip_compile *c)
{
    struct brw_compile *p = &c->func;
    struct brw_context *brw = p->brw;

    if (brw->gen == 5) {
        brw_AND(p, brw_null_reg(), c->reg.ff_sync, brw_imm_ud(0x1));
        brw_last_inst->header.destreg__conditionalmod = BRW_CONDITIONAL_Z;
        brw_IF(p, BRW_EXECUTE_1);
        {
            brw_OR(p, c->reg.ff_sync, c->reg.ff_sync, brw_imm_ud(0x1));
            brw_ff_sync(p,
			c->reg.R0,
			0,
			c->reg.R0,
			1, /* allocate */
			1, /* response length */
			0 /* eot */);
        }
        brw_ENDIF(p);
        brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
    }
}
Пример #23
0
void brw_clip_ff_sync(struct brw_clip_compile *c)
{
    struct intel_context *intel = &c->func.brw->intel;

    if (intel->needs_ff_sync) {
        struct brw_compile *p = &c->func;

        brw_set_conditionalmod(p, BRW_CONDITIONAL_Z);
        brw_AND(p, brw_null_reg(), c->reg.ff_sync, brw_imm_ud(0x1));
        brw_IF(p, BRW_EXECUTE_1);
        {
            brw_OR(p, c->reg.ff_sync, c->reg.ff_sync, brw_imm_ud(0x1));
            brw_ff_sync(p,
			c->reg.R0,
			0,
			c->reg.R0,
			1, /* allocate */
			1, /* response length */
			0 /* eot */);
        }
        brw_ENDIF(p);
        brw_set_predicate_control(p, BRW_PREDICATE_NONE);
    }
}
Пример #24
0
/**
 * Loads the clip distance for a vertex into `dst`, and ends with
 * a comparison of it to zero with the condition `cond`.
 *
 * - If using a fixed plane, the distance is dot(hpos, plane).
 * - If using a user clip plane, the distance is directly available in the vertex.
 */
static inline void
load_clip_distance(struct brw_clip_compile *c, struct brw_indirect vtx,
                struct brw_reg dst, GLuint hpos_offset, int cond)
{
   struct brw_codegen *p = &c->func;

   dst = vec4(dst);
   brw_AND(p, vec1(brw_null_reg()), c->reg.vertex_src_mask, brw_imm_ud(1));
   brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_NZ);
   brw_IF(p, BRW_EXECUTE_1);
   {
      struct brw_indirect temp_ptr = brw_indirect(7, 0);
      brw_ADD(p, get_addr_reg(temp_ptr), get_addr_reg(vtx), c->reg.clipdistance_offset);
      brw_MOV(p, vec1(dst), deref_1f(temp_ptr, 0));
   }
   brw_ELSE(p);
   {
      brw_MOV(p, dst, deref_4f(vtx, hpos_offset));
      brw_DP4(p, dst, dst, c->reg.plane_equation);
   }
   brw_ENDIF(p);

   brw_CMP(p, brw_null_reg(), cond, vec1(dst), brw_imm_f(0.0f));
}
Пример #25
0
/**
 * Generate the geometry shader program used on Gen6 to perform stream output
 * (transform feedback).
 */
void
gen6_sol_program(struct brw_gs_compile *c, struct brw_gs_prog_key *key,
	         unsigned num_verts, bool check_edge_flags)
{
   struct brw_compile *p = &c->func;
   c->prog_data.svbi_postincrement_value = num_verts;

   brw_gs_alloc_regs(c, num_verts, true);
   brw_gs_initialize_header(c);

   if (key->num_transform_feedback_bindings > 0) {
      unsigned vertex, binding;
      struct brw_reg destination_indices_uw =
         vec8(retype(c->reg.destination_indices, BRW_REGISTER_TYPE_UW));

      /* Note: since we use the binding table to keep track of buffer offsets
       * and stride, the GS doesn't need to keep track of a separate pointer
       * into each buffer; it uses a single pointer which increments by 1 for
       * each vertex.  So we use SVBI0 for this pointer, regardless of whether
       * transform feedback is in interleaved or separate attribs mode.
       *
       * Make sure that the buffers have enough room for all the vertices.
       */
      brw_ADD(p, get_element_ud(c->reg.temp, 0),
	         get_element_ud(c->reg.SVBI, 0), brw_imm_ud(num_verts));
      brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_LE,
	         get_element_ud(c->reg.temp, 0),
	         get_element_ud(c->reg.SVBI, 4));
      brw_IF(p, BRW_EXECUTE_1);

      /* Compute the destination indices to write to.  Usually we use SVBI[0]
       * + (0, 1, 2).  However, for odd-numbered triangles in tristrips, the
       * vertices come down the pipeline in reversed winding order, so we need
       * to flip the order when writing to the transform feedback buffer.  To
       * ensure that flatshading accuracy is preserved, we need to write them
       * in order SVBI[0] + (0, 2, 1) if we're using the first provoking
       * vertex convention, and in order SVBI[0] + (1, 0, 2) if we're using
       * the last provoking vertex convention.
       *
       * Note: since brw_imm_v can only be used in instructions in
       * packed-word execution mode, and SVBI is a double-word, we need to
       * first move the appropriate immediate constant ((0, 1, 2), (0, 2, 1),
       * or (1, 0, 2)) to the destination_indices register, and then add SVBI
       * using a separate instruction.  Also, since the immediate constant is
       * expressed as packed words, and we need to load double-words into
       * destination_indices, we need to intersperse zeros to fill the upper
       * halves of each double-word.
       */
      brw_MOV(p, destination_indices_uw,
              brw_imm_v(0x00020100)); /* (0, 1, 2) */
      if (num_verts == 3) {
         /* Get primitive type into temp register. */
         brw_AND(p, get_element_ud(c->reg.temp, 0),
                 get_element_ud(c->reg.R0, 2), brw_imm_ud(0x1f));

         /* Test if primitive type is TRISTRIP_REVERSE.  We need to do this as
          * an 8-wide comparison so that the conditional MOV that follows
          * moves all 8 words correctly.
          */
         brw_CMP(p, vec8(brw_null_reg()), BRW_CONDITIONAL_EQ,
                 get_element_ud(c->reg.temp, 0),
                 brw_imm_ud(_3DPRIM_TRISTRIP_REVERSE));

         /* If so, then overwrite destination_indices_uw with the appropriate
          * reordering.
          */
         brw_MOV(p, destination_indices_uw,
                 brw_imm_v(key->pv_first ? 0x00010200    /* (0, 2, 1) */
                                         : 0x00020001)); /* (1, 0, 2) */
         brw_set_predicate_control(p, BRW_PREDICATE_NONE);
      }
      brw_ADD(p, c->reg.destination_indices,
              c->reg.destination_indices, get_element_ud(c->reg.SVBI, 0));

      /* For each vertex, generate code to output each varying using the
       * appropriate binding table entry.
       */
      for (vertex = 0; vertex < num_verts; ++vertex) {
         /* Set up the correct destination index for this vertex */
         brw_MOV(p, get_element_ud(c->reg.header, 5),
                 get_element_ud(c->reg.destination_indices, vertex));

         for (binding = 0; binding < key->num_transform_feedback_bindings;
              ++binding) {
            unsigned char varying =
               key->transform_feedback_bindings[binding];
            unsigned char slot = c->vue_map.varying_to_slot[varying];
            /* From the Sandybridge PRM, Volume 2, Part 1, Section 4.5.1:
             *
             *   "Prior to End of Thread with a URB_WRITE, the kernel must
             *   ensure that all writes are complete by sending the final
             *   write as a committed write."
             */
            bool final_write =
               binding == key->num_transform_feedback_bindings - 1 &&
               vertex == num_verts - 1;
            struct brw_reg vertex_slot = c->reg.vertex[vertex];
            vertex_slot.nr += slot / 2;
            vertex_slot.subnr = (slot % 2) * 16;
            /* gl_PointSize is stored in VARYING_SLOT_PSIZ.w. */
            vertex_slot.dw1.bits.swizzle = varying == VARYING_SLOT_PSIZ
               ? BRW_SWIZZLE_WWWW : key->transform_feedback_swizzles[binding];
            brw_set_access_mode(p, BRW_ALIGN_16);
            brw_MOV(p, stride(c->reg.header, 4, 4, 1),
                    retype(vertex_slot, BRW_REGISTER_TYPE_UD));
            brw_set_access_mode(p, BRW_ALIGN_1);
            brw_svb_write(p,
                          final_write ? c->reg.temp : brw_null_reg(), /* dest */
                          1, /* msg_reg_nr */
                          c->reg.header, /* src0 */
                          SURF_INDEX_SOL_BINDING(binding), /* binding_table_index */
                          final_write); /* send_commit_msg */
         }
      }
      brw_ENDIF(p);

      /* Now, reinitialize the header register from R0 to restore the parts of
       * the register that we overwrote while streaming out transform feedback
       * data.
       */
      brw_gs_initialize_header(c);

      /* Finally, wait for the write commit to occur so that we can proceed to
       * other things safely.
       *
       * From the Sandybridge PRM, Volume 4, Part 1, Section 3.3:
       *
       *   The write commit does not modify the destination register, but
       *   merely clears the dependency associated with the destination
       *   register. Thus, a simple “mov” instruction using the register as a
       *   source is sufficient to wait for the write commit to occur.
       */
      brw_MOV(p, c->reg.temp, c->reg.temp);
   }

   brw_gs_ff_sync(c, 1);

   /* If RASTERIZER_DISCARD is enabled, we have nothing further to do, so
    * release the URB that was just allocated, and terminate the thread.
    */
   if (key->rasterizer_discard) {
      brw_gs_terminate(c);
      return;
   }

   brw_gs_overwrite_header_dw2_from_r0(c);
   switch (num_verts) {
   case 1:
      brw_gs_offset_header_dw2(c, URB_WRITE_PRIM_START | URB_WRITE_PRIM_END);
      brw_gs_emit_vue(c, c->reg.vertex[0], true);
      break;
   case 2:
      brw_gs_offset_header_dw2(c, URB_WRITE_PRIM_START);
      brw_gs_emit_vue(c, c->reg.vertex[0], false);
      brw_gs_offset_header_dw2(c, URB_WRITE_PRIM_END - URB_WRITE_PRIM_START);
      brw_gs_emit_vue(c, c->reg.vertex[1], true);
      break;
   case 3:
      if (check_edge_flags) {
         /* Only emit vertices 0 and 1 if this is the first triangle of the
          * polygon.  Otherwise they are redundant.
          */
         brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
         brw_AND(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
                 get_element_ud(c->reg.R0, 2),
                 brw_imm_ud(BRW_GS_EDGE_INDICATOR_0));
         brw_IF(p, BRW_EXECUTE_1);
      }
      brw_gs_offset_header_dw2(c, URB_WRITE_PRIM_START);
      brw_gs_emit_vue(c, c->reg.vertex[0], false);
      brw_gs_offset_header_dw2(c, -URB_WRITE_PRIM_START);
      brw_gs_emit_vue(c, c->reg.vertex[1], false);
      if (check_edge_flags) {
         brw_ENDIF(p);
         /* Only emit vertex 2 in PRIM_END mode if this is the last triangle
          * of the polygon.  Otherwise leave the primitive incomplete because
          * there are more polygon vertices coming.
          */
         brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
         brw_AND(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
                 get_element_ud(c->reg.R0, 2),
                 brw_imm_ud(BRW_GS_EDGE_INDICATOR_1));
         brw_set_predicate_control(p, BRW_PREDICATE_NORMAL);
      }
      brw_gs_offset_header_dw2(c, URB_WRITE_PRIM_END);
      brw_set_predicate_control(p, BRW_PREDICATE_NONE);
      brw_gs_emit_vue(c, c->reg.vertex[2], true);
      break;
   }
}
Пример #26
0
static void brw_clip_test( struct brw_clip_compile *c )
{
    struct brw_reg t = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
    struct brw_reg t1 = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
    struct brw_reg t2 = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
    struct brw_reg t3 = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);

    struct brw_reg v0 = get_tmp(c);
    struct brw_reg v1 = get_tmp(c);
    struct brw_reg v2 = get_tmp(c);

    struct brw_indirect vt0 = brw_indirect(0, 0);
    struct brw_indirect vt1 = brw_indirect(1, 0);
    struct brw_indirect vt2 = brw_indirect(2, 0);

    struct brw_compile *p = &c->func;
    struct brw_instruction *is_outside;
    struct brw_reg tmp0 = c->reg.loopcount; /* handy temporary */

    brw_MOV(p, get_addr_reg(vt0), brw_address(c->reg.vertex[0]));
    brw_MOV(p, get_addr_reg(vt1), brw_address(c->reg.vertex[1]));
    brw_MOV(p, get_addr_reg(vt2), brw_address(c->reg.vertex[2]));
    brw_MOV(p, v0, deref_4f(vt0, c->offset_hpos));
    brw_MOV(p, v1, deref_4f(vt1, c->offset_hpos));
    brw_MOV(p, v2, deref_4f(vt2, c->offset_hpos));
    brw_AND(p, c->reg.planemask, c->reg.planemask, brw_imm_ud(~0x3f));

    /* test nearz, xmin, ymin plane */
    /* clip.xyz < -clip.w */
    brw_CMP(p, t1, BRW_CONDITIONAL_L, v0, negate(get_element(v0, 3))); 
    brw_set_predicate_control(p, BRW_PREDICATE_NONE);
    brw_CMP(p, t2, BRW_CONDITIONAL_L, v1, negate(get_element(v1, 3))); 
    brw_set_predicate_control(p, BRW_PREDICATE_NONE);
    brw_CMP(p, t3, BRW_CONDITIONAL_L, v2, negate(get_element(v2, 3))); 
    brw_set_predicate_control(p, BRW_PREDICATE_NONE);

    /* All vertices are outside of a plane, rejected */
    brw_AND(p, t, t1, t2);
    brw_AND(p, t, t, t3);
    brw_OR(p, tmp0, get_element(t, 0), get_element(t, 1));
    brw_OR(p, tmp0, tmp0, get_element(t, 2));
    brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
    brw_AND(p, brw_null_reg(), tmp0, brw_imm_ud(0x1));
    is_outside = brw_IF(p, BRW_EXECUTE_1);
    {
        brw_clip_kill_thread(c);
    }
    brw_ENDIF(p, is_outside);
    brw_set_predicate_control(p, BRW_PREDICATE_NONE);

    /* some vertices are inside a plane, some are outside,need to clip */
    brw_XOR(p, t, t1, t2);
    brw_XOR(p, t1, t2, t3);
    brw_OR(p, t, t, t1);
    brw_AND(p, t, t, brw_imm_ud(0x1));
    brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_NZ,
            get_element(t, 0), brw_imm_ud(0));
    brw_OR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud((1<<5)));
    brw_set_predicate_control(p, BRW_PREDICATE_NONE);
    brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_NZ,
            get_element(t, 1), brw_imm_ud(0));
    brw_OR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud((1<<3)));
    brw_set_predicate_control(p, BRW_PREDICATE_NONE);
    brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_NZ,
            get_element(t, 2), brw_imm_ud(0));
    brw_OR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud((1<<1)));
    brw_set_predicate_control(p, BRW_PREDICATE_NONE);

    /* test farz, xmax, ymax plane */
    /* clip.xyz > clip.w */
    brw_CMP(p, t1, BRW_CONDITIONAL_G, v0, get_element(v0, 3)); 
    brw_set_predicate_control(p, BRW_PREDICATE_NONE);
    brw_CMP(p, t2, BRW_CONDITIONAL_G, v1, get_element(v1, 3)); 
    brw_set_predicate_control(p, BRW_PREDICATE_NONE);
    brw_CMP(p, t3, BRW_CONDITIONAL_G, v2, get_element(v2, 3)); 
    brw_set_predicate_control(p, BRW_PREDICATE_NONE);

    /* All vertices are outside of a plane, rejected */
    brw_AND(p, t, t1, t2);
    brw_AND(p, t, t, t3);
    brw_OR(p, tmp0, get_element(t, 0), get_element(t, 1));
    brw_OR(p, tmp0, tmp0, get_element(t, 2));
    brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
    brw_AND(p, brw_null_reg(), tmp0, brw_imm_ud(0x1));
    is_outside = brw_IF(p, BRW_EXECUTE_1);
    {
        brw_clip_kill_thread(c);
    }
    brw_ENDIF(p, is_outside);
    brw_set_predicate_control(p, BRW_PREDICATE_NONE);

    /* some vertices are inside a plane, some are outside,need to clip */
    brw_XOR(p, t, t1, t2);
    brw_XOR(p, t1, t2, t3);
    brw_OR(p, t, t, t1);
    brw_AND(p, t, t, brw_imm_ud(0x1));
    brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_NZ,
            get_element(t, 0), brw_imm_ud(0));
    brw_OR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud((1<<4)));
    brw_set_predicate_control(p, BRW_PREDICATE_NONE);
    brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_NZ,
            get_element(t, 1), brw_imm_ud(0));
    brw_OR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud((1<<2)));
    brw_set_predicate_control(p, BRW_PREDICATE_NONE);
    brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_NZ,
            get_element(t, 2), brw_imm_ud(0));
    brw_OR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud((1<<0)));
    brw_set_predicate_control(p, BRW_PREDICATE_NONE);

    release_tmps(c);
}
Пример #27
0
/* Use mesa's clipping algorithms, translated to GEN4 assembly.
 */
void brw_clip_tri( struct brw_clip_compile *c )
{
   struct brw_compile *p = &c->func;
   struct brw_indirect vtx = brw_indirect(0, 0);
   struct brw_indirect vtxPrev = brw_indirect(1, 0);
   struct brw_indirect vtxOut = brw_indirect(2, 0);
   struct brw_indirect plane_ptr = brw_indirect(3, 0);
   struct brw_indirect inlist_ptr = brw_indirect(4, 0);
   struct brw_indirect outlist_ptr = brw_indirect(5, 0);
   struct brw_indirect freelist_ptr = brw_indirect(6, 0);
   struct brw_instruction *plane_loop;
   struct brw_instruction *plane_active;
   struct brw_instruction *vertex_loop;
   struct brw_instruction *next_test;
   struct brw_instruction *prev_test;
   
   brw_MOV(p, get_addr_reg(vtxPrev),     brw_address(c->reg.vertex[2]) );
   brw_MOV(p, get_addr_reg(plane_ptr),   brw_clip_plane0_address(c));
   brw_MOV(p, get_addr_reg(inlist_ptr),  brw_address(c->reg.inlist));
   brw_MOV(p, get_addr_reg(outlist_ptr), brw_address(c->reg.outlist));

   brw_MOV(p, get_addr_reg(freelist_ptr), brw_address(c->reg.vertex[3]) );

   plane_loop = brw_DO(p, BRW_EXECUTE_1);
   {
      /* if (planemask & 1)
       */
      brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
      brw_AND(p, vec1(brw_null_reg()), c->reg.planemask, brw_imm_ud(1));
      
      plane_active = brw_IF(p, BRW_EXECUTE_1);
      {
	 /* vtxOut = freelist_ptr++ 
	  */
	 brw_MOV(p, get_addr_reg(vtxOut),       get_addr_reg(freelist_ptr) );
	 brw_ADD(p, get_addr_reg(freelist_ptr), get_addr_reg(freelist_ptr), brw_imm_uw(c->nr_regs * REG_SIZE));

	 if (c->key.nr_userclip)
	    brw_MOV(p, c->reg.plane_equation, deref_4f(plane_ptr, 0));
	 else
	    brw_MOV(p, c->reg.plane_equation, deref_4b(plane_ptr, 0));
	    
	 brw_MOV(p, c->reg.loopcount, c->reg.nr_verts);
	 brw_MOV(p, c->reg.nr_verts, brw_imm_ud(0));

	 vertex_loop = brw_DO(p, BRW_EXECUTE_1);
	 {
	    /* vtx = *input_ptr;
	     */
	    brw_MOV(p, get_addr_reg(vtx), deref_1uw(inlist_ptr, 0));

	    /* IS_NEGATIVE(prev) */
	    brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
	    brw_DP4(p, vec4(c->reg.dpPrev), deref_4f(vtxPrev, c->offset_hpos), c->reg.plane_equation);
	    prev_test = brw_IF(p, BRW_EXECUTE_1);
	    {
	       /* IS_POSITIVE(next)
		*/
	       brw_set_conditionalmod(p, BRW_CONDITIONAL_GE);
	       brw_DP4(p, vec4(c->reg.dp), deref_4f(vtx, c->offset_hpos), c->reg.plane_equation);
	       next_test = brw_IF(p, BRW_EXECUTE_1);
	       {

		  /* Coming back in.
		   */
		  brw_ADD(p, c->reg.t, c->reg.dpPrev, negate(c->reg.dp));
		  brw_math_invert(p, c->reg.t, c->reg.t);
		  brw_MUL(p, c->reg.t, c->reg.t, c->reg.dpPrev);

		  /* If (vtxOut == 0) vtxOut = vtxPrev
		   */
		  brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_EQ, get_addr_reg(vtxOut), brw_imm_uw(0) );
		  brw_MOV(p, get_addr_reg(vtxOut), get_addr_reg(vtxPrev) );
		  brw_set_predicate_control(p, BRW_PREDICATE_NONE);

		  brw_clip_interp_vertex(c, vtxOut, vtxPrev, vtx, c->reg.t, GL_FALSE);

		  /* *outlist_ptr++ = vtxOut;
		   * nr_verts++; 
		   * vtxOut = 0;
		   */
		  brw_MOV(p, deref_1uw(outlist_ptr, 0), get_addr_reg(vtxOut));
		  brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_uw(sizeof(short)));
		  brw_ADD(p, c->reg.nr_verts, c->reg.nr_verts, brw_imm_ud(1));
		  brw_MOV(p, get_addr_reg(vtxOut), brw_imm_uw(0) );
	       }
	       brw_ENDIF(p, next_test);
	       
	    }
	    prev_test = brw_ELSE(p, prev_test);
	    {
	       /* *outlist_ptr++ = vtxPrev;
		* nr_verts++;
		*/
	       brw_MOV(p, deref_1uw(outlist_ptr, 0), get_addr_reg(vtxPrev));
	       brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_uw(sizeof(short)));
	       brw_ADD(p, c->reg.nr_verts, c->reg.nr_verts, brw_imm_ud(1));

	       /* IS_NEGATIVE(next)
		*/
	       brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
	       brw_DP4(p, vec4(c->reg.dp), deref_4f(vtx, c->offset_hpos), c->reg.plane_equation);
	       next_test = brw_IF(p, BRW_EXECUTE_1);
	       {
		  /* Going out of bounds.  Avoid division by zero as we
		   * know dp != dpPrev from DIFFERENT_SIGNS, above.
		   */
		  brw_ADD(p, c->reg.t, c->reg.dp, negate(c->reg.dpPrev));
		  brw_math_invert(p, c->reg.t, c->reg.t);
		  brw_MUL(p, c->reg.t, c->reg.t, c->reg.dp);

		  /* If (vtxOut == 0) vtxOut = vtx
		   */
		  brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_EQ, get_addr_reg(vtxOut), brw_imm_uw(0) );
		  brw_MOV(p, get_addr_reg(vtxOut), get_addr_reg(vtx) );
		  brw_set_predicate_control(p, BRW_PREDICATE_NONE);

		  brw_clip_interp_vertex(c, vtxOut, vtx, vtxPrev, c->reg.t, GL_TRUE);		  

		  /* *outlist_ptr++ = vtxOut;
		   * nr_verts++; 
		   * vtxOut = 0;
		   */
		  brw_MOV(p, deref_1uw(outlist_ptr, 0), get_addr_reg(vtxOut));
		  brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_uw(sizeof(short)));
		  brw_ADD(p, c->reg.nr_verts, c->reg.nr_verts, brw_imm_ud(1));
		  brw_MOV(p, get_addr_reg(vtxOut), brw_imm_uw(0) );
	       } 	       
	       brw_ENDIF(p, next_test);
	    }
	    brw_ENDIF(p, prev_test);
	    
	    /* vtxPrev = vtx;
	     * inlist_ptr++;
	     */
	    brw_MOV(p, get_addr_reg(vtxPrev), get_addr_reg(vtx));
	    brw_ADD(p, get_addr_reg(inlist_ptr), get_addr_reg(inlist_ptr), brw_imm_uw(sizeof(short)));

	    /* while (--loopcount != 0)
	     */
	    brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
	    brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1));
	 } 
	 brw_WHILE(p, vertex_loop);

	 /* vtxPrev = *(outlist_ptr-1)  OR: outlist[nr_verts-1]
	  * inlist = outlist
	  * inlist_ptr = &inlist[0]
	  * outlist_ptr = &outlist[0]
	  */
	 brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_w(-2));
	 brw_MOV(p, get_addr_reg(vtxPrev), deref_1uw(outlist_ptr, 0));
	 brw_MOV(p, brw_vec8_grf(c->reg.inlist.nr, 0), brw_vec8_grf(c->reg.outlist.nr, 0));
	 brw_MOV(p, get_addr_reg(inlist_ptr), brw_address(c->reg.inlist));
	 brw_MOV(p, get_addr_reg(outlist_ptr), brw_address(c->reg.outlist));
      }
      brw_ENDIF(p, plane_active);
      
      /* plane_ptr++;
       */
      brw_ADD(p, get_addr_reg(plane_ptr), get_addr_reg(plane_ptr), brw_clip_plane_stride(c));

      /* nr_verts >= 3 
       */
      brw_CMP(p,
	      vec1(brw_null_reg()),
	      BRW_CONDITIONAL_GE,
	      c->reg.nr_verts,
	      brw_imm_ud(3));
   
      /* && (planemask>>=1) != 0
       */
      brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
      brw_SHR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud(1));
   }
   brw_WHILE(p, plane_loop);
}
Пример #28
0
/**
 * Generate assembly for a Vec4 IR instruction.
 *
 * \param instruction The Vec4 IR instruction to generate code for.
 * \param dst         The destination register.
 * \param src         An array of up to three source registers.
 */
void
vec4_generator::generate_vec4_instruction(vec4_instruction *instruction,
        struct brw_reg dst,
        struct brw_reg *src)
{
    vec4_instruction *inst = (vec4_instruction *) instruction;

    if (dst.width == BRW_WIDTH_4) {
        /* This happens in attribute fixups for "dual instanced" geometry
         * shaders, since they use attributes that are vec4's.  Since the exec
         * width is only 4, it's essential that the caller set
         * force_writemask_all in order to make sure the instruction is executed
         * regardless of which channels are enabled.
         */
        assert(inst->force_writemask_all);

        /* Fix up any <8;8,1> or <0;4,1> source registers to <4;4,1> to satisfy
         * the following register region restrictions (from Graphics BSpec:
         * 3D-Media-GPGPU Engine > EU Overview > Registers and Register Regions
         * > Register Region Restrictions)
         *
         *     1. ExecSize must be greater than or equal to Width.
         *
         *     2. If ExecSize = Width and HorzStride != 0, VertStride must be set
         *        to Width * HorzStride."
         */
        for (int i = 0; i < 3; i++) {
            if (src[i].file == BRW_GENERAL_REGISTER_FILE)
                src[i] = stride(src[i], 4, 4, 1);
        }
    }

    switch (inst->opcode) {
    case BRW_OPCODE_MOV:
        brw_MOV(p, dst, src[0]);
        break;
    case BRW_OPCODE_ADD:
        brw_ADD(p, dst, src[0], src[1]);
        break;
    case BRW_OPCODE_MUL:
        brw_MUL(p, dst, src[0], src[1]);
        break;
    case BRW_OPCODE_MACH:
        brw_set_acc_write_control(p, 1);
        brw_MACH(p, dst, src[0], src[1]);
        brw_set_acc_write_control(p, 0);
        break;

    case BRW_OPCODE_MAD:
        assert(brw->gen >= 6);
        brw_MAD(p, dst, src[0], src[1], src[2]);
        break;

    case BRW_OPCODE_FRC:
        brw_FRC(p, dst, src[0]);
        break;
    case BRW_OPCODE_RNDD:
        brw_RNDD(p, dst, src[0]);
        break;
    case BRW_OPCODE_RNDE:
        brw_RNDE(p, dst, src[0]);
        break;
    case BRW_OPCODE_RNDZ:
        brw_RNDZ(p, dst, src[0]);
        break;

    case BRW_OPCODE_AND:
        brw_AND(p, dst, src[0], src[1]);
        break;
    case BRW_OPCODE_OR:
        brw_OR(p, dst, src[0], src[1]);
        break;
    case BRW_OPCODE_XOR:
        brw_XOR(p, dst, src[0], src[1]);
        break;
    case BRW_OPCODE_NOT:
        brw_NOT(p, dst, src[0]);
        break;
    case BRW_OPCODE_ASR:
        brw_ASR(p, dst, src[0], src[1]);
        break;
    case BRW_OPCODE_SHR:
        brw_SHR(p, dst, src[0], src[1]);
        break;
    case BRW_OPCODE_SHL:
        brw_SHL(p, dst, src[0], src[1]);
        break;

    case BRW_OPCODE_CMP:
        brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
        break;
    case BRW_OPCODE_SEL:
        brw_SEL(p, dst, src[0], src[1]);
        break;

    case BRW_OPCODE_DPH:
        brw_DPH(p, dst, src[0], src[1]);
        break;

    case BRW_OPCODE_DP4:
        brw_DP4(p, dst, src[0], src[1]);
        break;

    case BRW_OPCODE_DP3:
        brw_DP3(p, dst, src[0], src[1]);
        break;

    case BRW_OPCODE_DP2:
        brw_DP2(p, dst, src[0], src[1]);
        break;

    case BRW_OPCODE_F32TO16:
        assert(brw->gen >= 7);
        brw_F32TO16(p, dst, src[0]);
        break;

    case BRW_OPCODE_F16TO32:
        assert(brw->gen >= 7);
        brw_F16TO32(p, dst, src[0]);
        break;

    case BRW_OPCODE_LRP:
        assert(brw->gen >= 6);
        brw_LRP(p, dst, src[0], src[1], src[2]);
        break;

    case BRW_OPCODE_BFREV:
        assert(brw->gen >= 7);
        /* BFREV only supports UD type for src and dst. */
        brw_BFREV(p, retype(dst, BRW_REGISTER_TYPE_UD),
                  retype(src[0], BRW_REGISTER_TYPE_UD));
        break;
    case BRW_OPCODE_FBH:
        assert(brw->gen >= 7);
        /* FBH only supports UD type for dst. */
        brw_FBH(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
        break;
    case BRW_OPCODE_FBL:
        assert(brw->gen >= 7);
        /* FBL only supports UD type for dst. */
        brw_FBL(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
        break;
    case BRW_OPCODE_CBIT:
        assert(brw->gen >= 7);
        /* CBIT only supports UD type for dst. */
        brw_CBIT(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
        break;
    case BRW_OPCODE_ADDC:
        assert(brw->gen >= 7);
        brw_set_acc_write_control(p, 1);
        brw_ADDC(p, dst, src[0], src[1]);
        brw_set_acc_write_control(p, 0);
        break;
    case BRW_OPCODE_SUBB:
        assert(brw->gen >= 7);
        brw_set_acc_write_control(p, 1);
        brw_SUBB(p, dst, src[0], src[1]);
        brw_set_acc_write_control(p, 0);
        break;

    case BRW_OPCODE_BFE:
        assert(brw->gen >= 7);
        brw_BFE(p, dst, src[0], src[1], src[2]);
        break;

    case BRW_OPCODE_BFI1:
        assert(brw->gen >= 7);
        brw_BFI1(p, dst, src[0], src[1]);
        break;
    case BRW_OPCODE_BFI2:
        assert(brw->gen >= 7);
        brw_BFI2(p, dst, src[0], src[1], src[2]);
        break;

    case BRW_OPCODE_IF:
        if (inst->src[0].file != BAD_FILE) {
            /* The instruction has an embedded compare (only allowed on gen6) */
            assert(brw->gen == 6);
            gen6_IF(p, inst->conditional_mod, src[0], src[1]);
        } else {
            struct brw_instruction *brw_inst = brw_IF(p, BRW_EXECUTE_8);
            brw_inst->header.predicate_control = inst->predicate;
        }
        break;

    case BRW_OPCODE_ELSE:
        brw_ELSE(p);
        break;
    case BRW_OPCODE_ENDIF:
        brw_ENDIF(p);
        break;

    case BRW_OPCODE_DO:
        brw_DO(p, BRW_EXECUTE_8);
        break;

    case BRW_OPCODE_BREAK:
        brw_BREAK(p);
        brw_set_predicate_control(p, BRW_PREDICATE_NONE);
        break;
    case BRW_OPCODE_CONTINUE:
        /* FINISHME: We need to write the loop instruction support still. */
        if (brw->gen >= 6)
            gen6_CONT(p);
        else
            brw_CONT(p);
        brw_set_predicate_control(p, BRW_PREDICATE_NONE);
        break;

    case BRW_OPCODE_WHILE:
        brw_WHILE(p);
        break;

    case SHADER_OPCODE_RCP:
    case SHADER_OPCODE_RSQ:
    case SHADER_OPCODE_SQRT:
    case SHADER_OPCODE_EXP2:
    case SHADER_OPCODE_LOG2:
    case SHADER_OPCODE_SIN:
    case SHADER_OPCODE_COS:
        if (brw->gen == 6) {
            generate_math1_gen6(inst, dst, src[0]);
        } else {
            /* Also works for Gen7. */
            generate_math1_gen4(inst, dst, src[0]);
        }
        break;

    case SHADER_OPCODE_POW:
    case SHADER_OPCODE_INT_QUOTIENT:
    case SHADER_OPCODE_INT_REMAINDER:
        if (brw->gen >= 7) {
            generate_math2_gen7(inst, dst, src[0], src[1]);
        } else if (brw->gen == 6) {
            generate_math2_gen6(inst, dst, src[0], src[1]);
        } else {
            generate_math2_gen4(inst, dst, src[0], src[1]);
        }
        break;

    case SHADER_OPCODE_TEX:
    case SHADER_OPCODE_TXD:
    case SHADER_OPCODE_TXF:
    case SHADER_OPCODE_TXF_CMS:
    case SHADER_OPCODE_TXF_MCS:
    case SHADER_OPCODE_TXL:
    case SHADER_OPCODE_TXS:
    case SHADER_OPCODE_TG4:
    case SHADER_OPCODE_TG4_OFFSET:
        generate_tex(inst, dst, src[0]);
        break;

    case VS_OPCODE_URB_WRITE:
        generate_vs_urb_write(inst);
        break;

    case SHADER_OPCODE_GEN4_SCRATCH_READ:
        generate_scratch_read(inst, dst, src[0]);
        break;

    case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
        generate_scratch_write(inst, dst, src[0], src[1]);
        break;

    case VS_OPCODE_PULL_CONSTANT_LOAD:
        generate_pull_constant_load(inst, dst, src[0], src[1]);
        break;

    case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
        generate_pull_constant_load_gen7(inst, dst, src[0], src[1]);
        break;

    case GS_OPCODE_URB_WRITE:
        generate_gs_urb_write(inst);
        break;

    case GS_OPCODE_THREAD_END:
        generate_gs_thread_end(inst);
        break;

    case GS_OPCODE_SET_WRITE_OFFSET:
        generate_gs_set_write_offset(dst, src[0], src[1]);
        break;

    case GS_OPCODE_SET_VERTEX_COUNT:
        generate_gs_set_vertex_count(dst, src[0]);
        break;

    case GS_OPCODE_SET_DWORD_2_IMMED:
        generate_gs_set_dword_2_immed(dst, src[0]);
        break;

    case GS_OPCODE_PREPARE_CHANNEL_MASKS:
        generate_gs_prepare_channel_masks(dst);
        break;

    case GS_OPCODE_SET_CHANNEL_MASKS:
        generate_gs_set_channel_masks(dst, src[0]);
        break;

    case GS_OPCODE_GET_INSTANCE_ID:
        generate_gs_get_instance_id(dst);
        break;

    case SHADER_OPCODE_SHADER_TIME_ADD:
        brw_shader_time_add(p, src[0],
                            prog_data->base.binding_table.shader_time_start);
        brw_mark_surface_used(&prog_data->base,
                              prog_data->base.binding_table.shader_time_start);
        break;

    case SHADER_OPCODE_UNTYPED_ATOMIC:
        generate_untyped_atomic(inst, dst, src[0], src[1]);
        break;

    case SHADER_OPCODE_UNTYPED_SURFACE_READ:
        generate_untyped_surface_read(inst, dst, src[0]);
        break;

    case VS_OPCODE_UNPACK_FLAGS_SIMD4X2:
        generate_unpack_flags(inst, dst);
        break;

    default:
        if (inst->opcode < (int) ARRAY_SIZE(opcode_descs)) {
            _mesa_problem(&brw->ctx, "Unsupported opcode in `%s' in vec4\n",
                          opcode_descs[inst->opcode].name);
        } else {
            _mesa_problem(&brw->ctx, "Unsupported opcode %d in vec4", inst->opcode);
        }
        abort();
    }
}
/* Interpolate between two vertices and put the result into a0.0.
 * Increment a0.0 accordingly.
 *
 * Beware that dest_ptr can be equal to v0_ptr!
 */
void brw_clip_interp_vertex( struct brw_clip_compile *c,
			     struct brw_indirect dest_ptr,
			     struct brw_indirect v0_ptr, /* from */
			     struct brw_indirect v1_ptr, /* to */
			     struct brw_reg t0,
			     bool force_edgeflag)
{
   struct brw_codegen *p = &c->func;
   struct brw_reg t_nopersp, v0_ndc_copy;
   GLuint slot;

   /* Just copy the vertex header:
    */
   /*
    * After CLIP stage, only first 256 bits of the VUE are read
    * back on Ironlake, so needn't change it
    */
   brw_copy_indirect_to_indirect(p, dest_ptr, v0_ptr, 1);


   /* First handle the 3D and NDC interpolation, in case we
    * need noperspective interpolation. Doing it early has no
    * performance impact in any case.
    */

   /* Take a copy of the v0 NDC coordinates, in case dest == v0. */
   if (c->has_noperspective_shading) {
      GLuint offset = brw_varying_to_offset(&c->vue_map,
                                                 BRW_VARYING_SLOT_NDC);
      v0_ndc_copy = get_tmp(c);
      brw_MOV(p, v0_ndc_copy, deref_4f(v0_ptr, offset));
   }

   /* Compute the new 3D position
    *
    * dest_hpos = v0_hpos * (1 - t0) + v1_hpos * t0
    */
   {
      GLuint delta = brw_varying_to_offset(&c->vue_map, VARYING_SLOT_POS);
      struct brw_reg tmp = get_tmp(c);
      brw_MUL(p, vec4(brw_null_reg()), deref_4f(v1_ptr, delta), t0);
      brw_MAC(p, tmp, negate(deref_4f(v0_ptr, delta)), t0);
      brw_ADD(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta), tmp);
      release_tmp(c, tmp);
   }

   /* Recreate the projected (NDC) coordinate in the new vertex header */
   brw_clip_project_vertex(c, dest_ptr);

   /* If we have noperspective attributes,
    * we need to compute the screen-space t
    */
   if (c->has_noperspective_shading) {
      GLuint delta = brw_varying_to_offset(&c->vue_map,
                                                BRW_VARYING_SLOT_NDC);
      struct brw_reg tmp = get_tmp(c);
      t_nopersp = get_tmp(c);

      /* t_nopersp = vec4(v1.xy, dest.xy) */
      brw_MOV(p, t_nopersp, deref_4f(v1_ptr, delta));
      brw_MOV(p, tmp, deref_4f(dest_ptr, delta));
      brw_set_default_access_mode(p, BRW_ALIGN_16);
      brw_MOV(p,
              brw_writemask(t_nopersp, WRITEMASK_ZW),
              brw_swizzle(tmp, 0, 1, 0, 1));

      /* t_nopersp = vec4(v1.xy, dest.xy) - v0.xyxy */
      brw_ADD(p, t_nopersp, t_nopersp,
              negate(brw_swizzle(v0_ndc_copy, 0, 1, 0, 1)));

      /* Add the absolute values of the X and Y deltas so that if
       * the points aren't in the same place on the screen we get
       * nonzero values to divide.
       *
       * After that, we have vert1 - vert0 in t_nopersp.x and
       * vertnew - vert0 in t_nopersp.y
       *
       * t_nopersp = vec2(|v1.x  -v0.x| + |v1.y  -v0.y|,
       *                  |dest.x-v0.x| + |dest.y-v0.y|)
       */
      brw_ADD(p,
              brw_writemask(t_nopersp, WRITEMASK_XY),
              brw_abs(brw_swizzle(t_nopersp, 0, 2, 0, 0)),
              brw_abs(brw_swizzle(t_nopersp, 1, 3, 0, 0)));
      brw_set_default_access_mode(p, BRW_ALIGN_1);

      /* If the points are in the same place, just substitute a
       * value to avoid divide-by-zero
       */
      brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_EQ,
              vec1(t_nopersp),
              brw_imm_f(0));
      brw_IF(p, BRW_EXECUTE_1);
      brw_MOV(p, t_nopersp, brw_imm_vf4(brw_float_to_vf(1.0),
                                        brw_float_to_vf(0.0),
                                        brw_float_to_vf(0.0),
                                        brw_float_to_vf(0.0)));
      brw_ENDIF(p);

      /* Now compute t_nopersp = t_nopersp.y/t_nopersp.x and broadcast it. */
      brw_math_invert(p, get_element(t_nopersp, 0), get_element(t_nopersp, 0));
      brw_MUL(p, vec1(t_nopersp), vec1(t_nopersp),
            vec1(suboffset(t_nopersp, 1)));
      brw_set_default_access_mode(p, BRW_ALIGN_16);
      brw_MOV(p, t_nopersp, brw_swizzle(t_nopersp, 0, 0, 0, 0));
      brw_set_default_access_mode(p, BRW_ALIGN_1);

      release_tmp(c, tmp);
      release_tmp(c, v0_ndc_copy);
   }

   /* Now we can iterate over each attribute
    * (could be done in pairs?)
    */
   for (slot = 0; slot < c->vue_map.num_slots; slot++) {
      int varying = c->vue_map.slot_to_varying[slot];
      GLuint delta = brw_vue_slot_to_offset(slot);

      /* HPOS, NDC already handled above */
      if (varying == VARYING_SLOT_POS || varying == BRW_VARYING_SLOT_NDC)
         continue;


      if (varying == VARYING_SLOT_EDGE) {
	 if (force_edgeflag)
	    brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(1));
	 else
	    brw_MOV(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta));
      } else if (varying == VARYING_SLOT_PSIZ) {
         /* PSIZ doesn't need interpolation because it isn't used by the
          * fragment shader.
          */
      } else if (varying < VARYING_SLOT_MAX) {
	 /* This is a true vertex result (and not a special value for the VUE
	  * header), so interpolate:
	  *
	  *        New = attr0 + t*attr1 - t*attr0
          *
          * Unless the attribute is flat shaded -- in which case just copy
          * from one of the sources (doesn't matter which; already copied from pv)
	  */
         GLuint interp = c->key.interpolation_mode.mode[slot];

         if (interp != INTERP_QUALIFIER_FLAT) {
            struct brw_reg tmp = get_tmp(c);
            struct brw_reg t =
               interp == INTERP_QUALIFIER_NOPERSPECTIVE ? t_nopersp : t0;

            brw_MUL(p,
                  vec4(brw_null_reg()),
                  deref_4f(v1_ptr, delta),
                  t);

            brw_MAC(p,
                  tmp,
                  negate(deref_4f(v0_ptr, delta)),
                  t);

            brw_ADD(p,
                  deref_4f(dest_ptr, delta),
                  deref_4f(v0_ptr, delta),
                  tmp);

            release_tmp(c, tmp);
         }
         else {
            brw_MOV(p,
                  deref_4f(dest_ptr, delta),
                  deref_4f(v0_ptr, delta));
         }
      }
   }

   if (c->vue_map.num_slots % 2) {
      GLuint delta = brw_vue_slot_to_offset(c->vue_map.num_slots);

      brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(0));
   }

   if (c->has_noperspective_shading)
      release_tmp(c, t_nopersp);
}
Пример #30
0
/***********************************************************************
 * Output clipped polygon as an unfilled primitive:
 */
static void emit_lines(struct brw_clip_compile *c,
		       bool do_offset)
{
   struct brw_compile *p = &c->func;
   const struct brw_context *brw = p->brw;
   struct brw_indirect v0 = brw_indirect(0, 0);
   struct brw_indirect v1 = brw_indirect(1, 0);
   struct brw_indirect v0ptr = brw_indirect(2, 0);
   struct brw_indirect v1ptr = brw_indirect(3, 0);

   /* Need a seperate loop for offset:
    */
   if (do_offset) {
      brw_MOV(p, c->reg.loopcount, c->reg.nr_verts);
      brw_MOV(p, get_addr_reg(v0ptr), brw_address(c->reg.inlist));

      brw_DO(p, BRW_EXECUTE_1);
      {
	 brw_MOV(p, get_addr_reg(v0), deref_1uw(v0ptr, 0));
	 brw_ADD(p, get_addr_reg(v0ptr), get_addr_reg(v0ptr), brw_imm_uw(2));
	
	 apply_one_offset(c, v0);
	
	 brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1));
         brw_inst_set_cond_modifier(brw, brw_last_inst, BRW_CONDITIONAL_G);
      }
      brw_WHILE(p);
      brw_inst_set_pred_control(brw, brw_last_inst, BRW_PREDICATE_NORMAL);
   }

   /* v1ptr = &inlist[nr_verts]
    * *v1ptr = v0
    */
   brw_MOV(p, c->reg.loopcount, c->reg.nr_verts);
   brw_MOV(p, get_addr_reg(v0ptr), brw_address(c->reg.inlist));
   brw_ADD(p, get_addr_reg(v1ptr), get_addr_reg(v0ptr), retype(c->reg.nr_verts, BRW_REGISTER_TYPE_UW));
   brw_ADD(p, get_addr_reg(v1ptr), get_addr_reg(v1ptr), retype(c->reg.nr_verts, BRW_REGISTER_TYPE_UW));
   brw_MOV(p, deref_1uw(v1ptr, 0), deref_1uw(v0ptr, 0));

   brw_DO(p, BRW_EXECUTE_1);
   {
      brw_MOV(p, get_addr_reg(v0), deref_1uw(v0ptr, 0));
      brw_MOV(p, get_addr_reg(v1), deref_1uw(v0ptr, 2));
      brw_ADD(p, get_addr_reg(v0ptr), get_addr_reg(v0ptr), brw_imm_uw(2));

      /* draw edge if edgeflag != 0 */
      brw_CMP(p,
	      vec1(brw_null_reg()), BRW_CONDITIONAL_NZ,
	      deref_1f(v0, brw_varying_to_offset(&c->vue_map,
                                                 VARYING_SLOT_EDGE)),
	      brw_imm_f(0));
      brw_IF(p, BRW_EXECUTE_1);
      {
	 brw_clip_emit_vue(c, v0, BRW_URB_WRITE_ALLOCATE_COMPLETE,
                           (_3DPRIM_LINESTRIP << URB_WRITE_PRIM_TYPE_SHIFT)
                           | URB_WRITE_PRIM_START);
	 brw_clip_emit_vue(c, v1, BRW_URB_WRITE_ALLOCATE_COMPLETE,
                           (_3DPRIM_LINESTRIP << URB_WRITE_PRIM_TYPE_SHIFT)
                           | URB_WRITE_PRIM_END);
      }
      brw_ENDIF(p);

      brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1));
      brw_inst_set_cond_modifier(brw, brw_last_inst, BRW_CONDITIONAL_NZ);
   }
   brw_WHILE(p);
   brw_inst_set_pred_control(brw, brw_last_inst, BRW_PREDICATE_NORMAL);
}