Ejemplo n.º 1
0
void
gen8_vec4_generator::generate_scratch_write(vec4_instruction *ir,
                                            struct brw_reg dst,
                                            struct brw_reg src,
                                            struct brw_reg index)
{
   struct brw_reg header = brw_vec8_grf(GEN7_MRF_HACK_START + ir->base_mrf, 0);

   MOV_RAW(header, brw_vec8_grf(0, 0));

   generate_oword_dual_block_offsets(brw_message_reg(ir->base_mrf + 1), index);

   MOV(retype(brw_message_reg(ir->base_mrf + 2), BRW_REGISTER_TYPE_D),
       retype(src, BRW_REGISTER_TYPE_D));

   /* Each of the 8 channel enables is considered for whether each
    * dword is written.
    */
   gen8_instruction *send = next_inst(BRW_OPCODE_SEND);
   gen8_set_dst(brw, send, dst);
   gen8_set_src0(brw, send, header);
   gen8_set_pred_control(send, ir->predicate);
   gen8_set_dp_message(brw, send, GEN7_SFID_DATAPORT_DATA_CACHE,
                       255, /* binding table index: stateless access */
                       GEN7_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE,
                       BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
                       3,      /* mlen */
                       0,      /* rlen */
                       true,   /* header present */
                       false); /* EOT */
}
Ejemplo n.º 2
0
static void wm_src_affine(struct brw_compile *p)
{
	brw_PLN(p, brw_message_reg(2), brw_vec1_grf(6,0), brw_vec8_grf(2,0));
	brw_PLN(p, brw_message_reg(3), brw_vec1_grf(6,0), brw_vec8_grf(4,0));
	brw_PLN(p, brw_message_reg(4), brw_vec1_grf(6,4), brw_vec8_grf(2,0));
	brw_PLN(p, brw_message_reg(5), brw_vec1_grf(6,4), brw_vec8_grf(4,0));
}
Ejemplo n.º 3
0
static void brw_wm_xy(struct brw_compile *p, int dw)
{
	struct brw_reg r1 = brw_vec1_grf(1, 0);
	struct brw_reg r1_uw = __retype_uw(r1);
	struct brw_reg x_uw, y_uw;

	brw_set_compression_control(p, BRW_COMPRESSION_NONE);

	if (dw == 16) {
		x_uw = brw_uw16_grf(30, 0);
		y_uw = brw_uw16_grf(28, 0);
	} else {
		x_uw = brw_uw8_grf(30, 0);
		y_uw = brw_uw8_grf(28, 0);
	}

	brw_ADD(p,
		x_uw,
		__stride(__suboffset(r1_uw, 4), 2, 4, 0),
		brw_imm_v(0x10101010));
	brw_ADD(p,
		y_uw,
		__stride(__suboffset(r1_uw, 5), 2, 4, 0),
		brw_imm_v(0x11001100));

	brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);

	brw_ADD(p, brw_vec8_grf(X16, 0), vec8(x_uw), brw_negate(r1));
	brw_ADD(p, brw_vec8_grf(Y16, 0), vec8(y_uw), brw_negate(__suboffset(r1, 1)));
}
static void fire_fb_write( struct brw_wm_compile *c,
			   GLuint base_reg,
			   GLuint nr,
			   GLuint target,
			   GLuint eot )
{
   struct brw_compile *p = &c->func;
   
   /* Pass through control information:
    */
/*  mov (8) m1.0<1>:ud   r1.0<8;8,1>:ud   { Align1 NoMask } */
   {
      brw_push_insn_state(p);
      brw_set_mask_control(p, BRW_MASK_DISABLE); /* ? */
      brw_set_compression_control(p, BRW_COMPRESSION_NONE);
      brw_MOV(p, 
	       brw_message_reg(base_reg + 1),
	       brw_vec8_grf(1, 0));
      brw_pop_insn_state(p);
   }

   /* Send framebuffer write message: */
/*  send (16) null.0<1>:uw m0               r0.0<8;8,1>:uw   0x85a04000:ud    { Align1 EOT } */
   brw_fb_WRITE(p,
		retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW),
		base_reg,
		retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW),
		target,		
		nr,
		0, 
		eot);
}
Ejemplo n.º 5
0
void
gen8_vec4_generator::generate_urb_write(vec4_instruction *ir, bool vs)
{
   struct brw_reg header = brw_vec8_grf(GEN7_MRF_HACK_START + ir->base_mrf, 0);

   /* Copy g0. */
   if (vs)
      MOV_RAW(header, brw_vec8_grf(0, 0));

   gen8_instruction *inst;
   if (!(ir->urb_write_flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
      /* Enable Channel Masks in the URB_WRITE_OWORD message header */
      default_state.access_mode = BRW_ALIGN_1;
      inst = OR(retype(brw_vec1_grf(GEN7_MRF_HACK_START + ir->base_mrf, 5),
                       BRW_REGISTER_TYPE_UD),
                retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
                brw_imm_ud(0xff00));
      gen8_set_mask_control(inst, BRW_MASK_DISABLE);
      default_state.access_mode = BRW_ALIGN_16;
   }

   inst = next_inst(BRW_OPCODE_SEND);
   gen8_set_urb_message(brw, inst, ir->urb_write_flags, ir->mlen, 0, ir->offset,
                        true);
   gen8_set_dst(brw, inst, brw_null_reg());
   gen8_set_src0(brw, inst, header);
}
Ejemplo n.º 6
0
/**
 * Allocate registers for GS.
 *
 * If sol_program is true, then:
 *
 * - The thread will be spawned with the "SVBI Payload Enable" bit set, so GRF
 *   1 needs to be set aside to hold the streamed vertex buffer indices.
 *
 * - The thread will need to use the destination_indices register.
 */
static void brw_gs_alloc_regs( struct brw_gs_compile *c,
			       GLuint nr_verts,
                               bool sol_program )
{
   GLuint i = 0,j;

   /* Register usage is static, precompute here:
    */
   c->reg.R0 = retype(brw_vec8_grf(i, 0), BRW_REGISTER_TYPE_UD); i++;

   /* Streamed vertex buffer indices */
   if (sol_program)
      c->reg.SVBI = retype(brw_vec8_grf(i++, 0), BRW_REGISTER_TYPE_UD);

   /* Payload vertices plus space for more generated vertices:
    */
   for (j = 0; j < nr_verts; j++) {
      c->reg.vertex[j] = brw_vec4_grf(i, 0);
      i += c->nr_regs;
   }

   c->reg.header = retype(brw_vec8_grf(i++, 0), BRW_REGISTER_TYPE_UD);
   c->reg.temp = retype(brw_vec8_grf(i++, 0), BRW_REGISTER_TYPE_UD);

   if (sol_program) {
      c->reg.destination_indices =
         retype(brw_vec4_grf(i++, 0), BRW_REGISTER_TYPE_UD);
   }

   c->prog_data.urb_read_length = c->nr_regs; 
   c->prog_data.total_grf = i;
}
Ejemplo n.º 7
0
static void
gen_ADD_GRF_GRF_IMM(struct brw_codegen *p)
{
   struct brw_reg g0 = brw_vec8_grf(0, 0);
   struct brw_reg g2 = brw_vec8_grf(2, 0);

   brw_ADD(p, g0, g2, brw_imm_f(1.0));
}
Ejemplo n.º 8
0
static void
gen_ADD_GRF_GRF_IMM_d(struct brw_codegen *p)
{
   struct brw_reg g0 = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_D);
   struct brw_reg g2 = retype(brw_vec8_grf(2, 0), BRW_REGISTER_TYPE_D);

   brw_ADD(p, g0, g2, brw_imm_d(1));
}
Ejemplo n.º 9
0
static void
gen_MOV_GRF_GRF(struct brw_codegen *p)
{
   struct brw_reg g0 = brw_vec8_grf(0, 0);
   struct brw_reg g2 = brw_vec8_grf(2, 0);

   brw_MOV(p, g0, g2);
}
Ejemplo n.º 10
0
static void
gen_ADD_GRF_GRF_GRF(struct brw_codegen *p)
{
   struct brw_reg g0 = brw_vec8_grf(0, 0);
   struct brw_reg g2 = brw_vec8_grf(2, 0);
   struct brw_reg g4 = brw_vec8_grf(4, 0);

   brw_ADD(p, g0, g2, g4);
}
Ejemplo n.º 11
0
static void brw_clip_line_alloc_regs( struct brw_clip_compile *c )
{
   const struct gen_device_info *devinfo = c->func.devinfo;
   GLuint i = 0,j;

   /* Register usage is static, precompute here:
    */
   c->reg.R0 = retype(brw_vec8_grf(i, 0), BRW_REGISTER_TYPE_UD); i++;

   if (c->key.nr_userclip) {
      c->reg.fixed_planes = brw_vec4_grf(i, 0);
      i += (6 + c->key.nr_userclip + 1) / 2;

      c->prog_data.curb_read_length = (6 + c->key.nr_userclip + 1) / 2;
   }
   else
      c->prog_data.curb_read_length = 0;


   /* Payload vertices plus space for more generated vertices:
    */
   for (j = 0; j < 4; j++) {
      c->reg.vertex[j] = brw_vec4_grf(i, 0);
      i += c->nr_regs;
   }

   c->reg.t           = brw_vec1_grf(i, 0);
   c->reg.t0          = brw_vec1_grf(i, 1);
   c->reg.t1          = brw_vec1_grf(i, 2);
   c->reg.planemask   = retype(brw_vec1_grf(i, 3), BRW_REGISTER_TYPE_UD);
   c->reg.plane_equation = brw_vec4_grf(i, 4);
   i++;

   c->reg.dp0         = brw_vec1_grf(i, 0); /* fixme - dp4 will clobber r.1,2,3 */
   c->reg.dp1         = brw_vec1_grf(i, 4);
   i++;

   if (!c->key.nr_userclip) {
      c->reg.fixed_planes = brw_vec8_grf(i, 0);
      i++;
   }

   c->reg.vertex_src_mask = retype(brw_vec1_grf(i, 0), BRW_REGISTER_TYPE_UD);
   c->reg.clipdistance_offset = retype(brw_vec1_grf(i, 1), BRW_REGISTER_TYPE_W);
   i++;

   if (devinfo->gen == 5) {
      c->reg.ff_sync = retype(brw_vec1_grf(i, 0), BRW_REGISTER_TYPE_UD);
      i++;
   }

   c->first_tmp = i;
   c->last_tmp = i;

   c->prog_data.urb_read_length = c->nr_regs; /* ? */
   c->prog_data.total_grf = i;
}
Ejemplo n.º 12
0
static void
gen_ADD_MRF_GRF_GRF(struct brw_codegen *p)
{
   struct brw_reg m6 = brw_vec8_reg(BRW_MESSAGE_REGISTER_FILE, 6, 0);
   struct brw_reg g2 = brw_vec8_grf(2, 0);
   struct brw_reg g4 = brw_vec8_grf(4, 0);

   brw_ADD(p, m6, g2, g4);
}
Ejemplo n.º 13
0
static void
gen_f0_0_MOV_GRF_GRF(struct brw_codegen *p)
{
   struct brw_reg g0 = brw_vec8_grf(0, 0);
   struct brw_reg g2 = brw_vec8_grf(2, 0);

   brw_push_insn_state(p);
   brw_set_default_predicate_control(p, true);
   brw_MOV(p, g0, g2);
   brw_pop_insn_state(p);
}
Ejemplo n.º 14
0
/* The handling of f0.1 vs f0.0 changes between gen6 and gen7.  Explicitly test
 * it, so that we run the fuzzing can run over all the other bits that might
 * interact with it.
 */
static void
gen_f0_1_MOV_GRF_GRF(struct brw_codegen *p)
{
   struct brw_reg g0 = brw_vec8_grf(0, 0);
   struct brw_reg g2 = brw_vec8_grf(2, 0);

   brw_push_insn_state(p);
   brw_set_default_predicate_control(p, true);
   brw_inst *mov = brw_MOV(p, g0, g2);
   brw_inst_set_flag_subreg_nr(p->devinfo, mov, 1);
   brw_pop_insn_state(p);
}
Ejemplo n.º 15
0
static void alloc_regs( struct brw_sf_compile *c )
{
    GLuint reg, i;

    /* Values computed by fixed function unit:
     */
    c->pv  = retype(brw_vec1_grf(1, 1), BRW_REGISTER_TYPE_D);
    c->det = brw_vec1_grf(1, 2);
    c->dx0 = brw_vec1_grf(1, 3);
    c->dx2 = brw_vec1_grf(1, 4);
    c->dy0 = brw_vec1_grf(1, 5);
    c->dy2 = brw_vec1_grf(1, 6);

    /* z and 1/w passed in seperately:
     */
    c->z[0]     = brw_vec1_grf(2, 0);
    c->inv_w[0] = brw_vec1_grf(2, 1);
    c->z[1]     = brw_vec1_grf(2, 2);
    c->inv_w[1] = brw_vec1_grf(2, 3);
    c->z[2]     = brw_vec1_grf(2, 4);
    c->inv_w[2] = brw_vec1_grf(2, 5);

    /* The vertices:
     */
    reg = 3;
    for (i = 0; i < c->nr_verts; i++) {
        c->vert[i] = brw_vec8_grf(reg, 0);
        reg += c->nr_attr_regs;
    }

    /* Temporaries, allocated after last vertex reg.
     */
    c->inv_det = brw_vec1_grf(reg, 0);
    reg++;
    c->a1_sub_a0 = brw_vec8_grf(reg, 0);
    reg++;
    c->a2_sub_a0 = brw_vec8_grf(reg, 0);
    reg++;
    c->tmp = brw_vec8_grf(reg, 0);
    reg++;

    /* Note grf allocation:
     */
    c->prog_data.total_grf = reg;


    /* Outputs of this program - interpolation coefficients for
     * rasterization:
     */
    c->m1Cx = brw_vec8_reg(BRW_MESSAGE_REGISTER_FILE, 1, 0);
    c->m2Cy = brw_vec8_reg(BRW_MESSAGE_REGISTER_FILE, 2, 0);
    c->m3C0 = brw_vec8_reg(BRW_MESSAGE_REGISTER_FILE, 3, 0);
}
Ejemplo n.º 16
0
/* The handling of f0.1 vs f0.0 changes between gen6 and gen7.  Explicitly test
 * it, so that we run the fuzzing can run over all the other bits that might
 * interact with it.
 */
static void
gen_f0_1_MOV_GRF_GRF(struct brw_compile *p)
{
   struct brw_reg g0 = brw_vec8_grf(0, 0);
   struct brw_reg g2 = brw_vec8_grf(2, 0);

   brw_push_insn_state(p);
   brw_set_predicate_control(p, true);
   struct brw_instruction *mov = brw_MOV(p, g0, g2);
   mov->bits2.da1.flag_subreg_nr = 1;
   brw_pop_insn_state(p);
}
static void prealloc_reg(struct brw_wm_compile *c)
{
    int i, j;
    struct brw_reg reg;
    int nr_interp_regs = 0;
    GLuint inputs = FRAG_BIT_WPOS | c->fp_interp_emitted | c->fp_deriv_emitted;

    for (i = 0; i < 4; i++) {
	reg = (i < c->key.nr_depth_regs) 
	    ? brw_vec8_grf(i*2, 0) : brw_vec8_grf(0, 0);
	set_reg(c, PROGRAM_PAYLOAD, PAYLOAD_DEPTH, i, reg);
    }
    c->reg_index += 2*c->key.nr_depth_regs;
    {
	int nr_params = c->fp->program.Base.Parameters->NumParameters;
	struct gl_program_parameter_list *plist = 
	    c->fp->program.Base.Parameters;
	int index = 0;
	c->prog_data.nr_params = 4*nr_params;
	for (i = 0; i < nr_params; i++) {
	    for (j = 0; j < 4; j++, index++) {
		reg = brw_vec1_grf(c->reg_index + index/8, 
			index%8);
		c->prog_data.param[index] = 
		    &plist->ParameterValues[i][j];
		set_reg(c, PROGRAM_STATE_VAR, i, j, reg);
	    }
	}
	c->nr_creg = 2*((4*nr_params+15)/16);
	c->reg_index += c->nr_creg;
    }
    for (i = 0; i < FRAG_ATTRIB_MAX; i++) {
	if (inputs & (1<<i)) {
	    nr_interp_regs++;
	    reg = brw_vec8_grf(c->reg_index, 0);
	    for (j = 0; j < 4; j++)
		set_reg(c, PROGRAM_PAYLOAD, i, j, reg);
	    c->reg_index += 2;

	}
    }
    c->prog_data.first_curbe_grf = c->key.nr_depth_regs * 2;
    c->prog_data.urb_read_length = nr_interp_regs * 2;
    c->prog_data.curb_read_length = c->nr_creg;
    c->emit_mask_reg = brw_uw1_reg(BRW_GENERAL_REGISTER_FILE, c->reg_index, 0);
    c->reg_index++;
    c->stack =  brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, c->reg_index, 0);
    c->reg_index += 2;
}
Ejemplo n.º 18
0
static void brw_fb_write(struct brw_compile *p, int dw)
{
	struct brw_instruction *insn;
	unsigned msg_control, msg_type, msg_len;
	struct brw_reg src0;
	bool header;

	if (dw == 16) {
		brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
		msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE;
		msg_len = 8;
	} else {
		brw_set_compression_control(p, BRW_COMPRESSION_NONE);
		msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01;
		msg_len = 4;
	}

	if (p->gen < 060) {
		brw_push_insn_state(p);
		brw_set_compression_control(p, BRW_COMPRESSION_NONE);
		brw_set_mask_control(p, BRW_MASK_DISABLE);
		brw_MOV(p, brw_message_reg(1), brw_vec8_grf(1, 0));
		brw_pop_insn_state(p);

		msg_len += 2;
	}

	/* The execution mask is ignored for render target writes. */
	insn = brw_next_insn(p, BRW_OPCODE_SEND);
	insn->header.predicate_control = 0;
	insn->header.compression_control = BRW_COMPRESSION_NONE;

	if (p->gen >= 060) {
		msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
		src0 = brw_message_reg(2);
		header = false;
	} else {
		insn->header.destreg__conditionalmod = 0;
		msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
		src0 = __retype_uw(brw_vec8_grf(0, 0));
		header = true;
	}

	brw_set_dest(p, insn, null_result(dw));
	brw_set_src0(p, insn, src0);
	brw_set_dp_write_message(p, insn, 0,
				 msg_control, msg_type, msg_len,
				 header, true, 0, true, false);
}
Ejemplo n.º 19
0
static void alloc_contiguous_dest(struct brw_wm_compile *c, 
				  struct brw_wm_value *dst[],
				  GLuint nr,
				  GLuint thisinsn)
{
   GLuint reg = search_contiguous_regs(c, nr, thisinsn);
   GLuint i;

   for (i = 0; i < nr; i++) {
      if (!dst[i]) {
	 /* Need to grab a dummy value in TEX case.  Don't introduce
	  * it into the tracking scheme.
	  */
	 dst[i] = &c->vreg[c->nr_vreg++];
      }
      else {
	 assert(!dst[i]->resident);
	 assert(c->pass2_grf[reg+i].nextuse != thisinsn);

	 c->pass2_grf[reg+i].value = dst[i];
	 c->pass2_grf[reg+i].nextuse = thisinsn;

	 dst[i]->resident = &c->pass2_grf[reg+i];
      }

      dst[i]->hw_reg = brw_vec8_grf((reg+i)*2, 0);
   }

   if ((reg+nr)*2 > c->max_wm_grf)
      c->max_wm_grf = (reg+nr) * 2;
}
Ejemplo n.º 20
0
void
vec4_gs_visitor::emit_thread_end()
{
   if (c->control_data_header_size_bits > 0) {
      /* During shader execution, we only ever call emit_control_data_bits()
       * just prior to outputting a vertex.  Therefore, the control data bits
       * corresponding to the most recently output vertex still need to be
       * emitted.
       */
      current_annotation = "thread end: emit control data bits";
      emit_control_data_bits();
   }

   /* MRF 0 is reserved for the debugger, so start with message header
    * in MRF 1.
    */
   int base_mrf = 1;

   current_annotation = "thread end";
   dst_reg mrf_reg(MRF, base_mrf);
   src_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
   vec4_instruction *inst = emit(MOV(mrf_reg, r0));
   inst->force_writemask_all = true;
   emit(GS_OPCODE_SET_VERTEX_COUNT, mrf_reg, this->vertex_count);
   if (INTEL_DEBUG & DEBUG_SHADER_TIME)
      emit_shader_time_end();
   inst = emit(GS_OPCODE_THREAD_END);
   inst->base_mrf = base_mrf;
   inst->mlen = 1;
}
static struct brw_reg 
get_reg(struct brw_wm_compile *c, int file, int index, int component, int nr, GLuint neg, GLuint abs)
{
    struct brw_reg reg;
    switch (file) {
	case PROGRAM_STATE_VAR:
	case PROGRAM_CONSTANT:
	case PROGRAM_UNIFORM:
	    file = PROGRAM_STATE_VAR;
	    break;
	case PROGRAM_UNDEFINED:
	    return brw_null_reg();	
	default:
	    break;
    }

    if(c->wm_regs[file][index][component].inited)
	reg = c->wm_regs[file][index][component].reg;
    else 
	reg = brw_vec8_grf(c->reg_index, 0);

    if(!c->wm_regs[file][index][component].inited) {
	set_reg(c, file, index, component, reg);
	c->reg_index++;
    }

    if (neg & (1<< component)) {
	reg = negate(reg);
    }
    if (abs)
	reg = brw_abs(reg);
    return reg;
}
Ejemplo n.º 22
0
static int brw_wm_sample__alpha(struct brw_compile *p, int dw,
				int channel, int msg, int result)
{
	struct brw_reg src0;
	int mlen, rlen;

	if (dw == 8) {
		/* SIMD8 sample return is not masked */
		mlen = 3;
		rlen = 4;
	} else {
		mlen = 5;
		rlen = 2;
	}

	if (p->gen >= 060)
		src0 = brw_message_reg(msg);
	else
		src0 = brw_vec8_grf(0, 0);

	brw_SAMPLE(p, sample_result(dw, result), msg, src0,
		   channel+1, channel, WRITEMASK_W, 0,
		   rlen, mlen, true, simd(dw));

	if (dw == 8)
		result += 3;

	return result;
}
Ejemplo n.º 23
0
void emit_linterp(struct brw_compile *p,
		  const struct brw_reg *dst,
		  GLuint mask,
		  const struct brw_reg *arg0,
		  const struct brw_reg *deltas)
{
   struct intel_context *intel = &p->brw->intel;
   struct brw_reg interp[4];
   GLuint nr = arg0[0].nr;
   GLuint i;

   interp[0] = brw_vec1_grf(nr, 0);
   interp[1] = brw_vec1_grf(nr, 4);
   interp[2] = brw_vec1_grf(nr+1, 0);
   interp[3] = brw_vec1_grf(nr+1, 4);

   for (i = 0; i < 4; i++) {
      if (mask & (1<<i)) {
	 if (intel->gen >= 6) {
	    brw_PLN(p, dst[i], interp[i], brw_vec8_grf(2, 0));
	 } else if (can_do_pln(intel, deltas)) {
	    brw_PLN(p, dst[i], interp[i], deltas[0]);
	 } else {
	    brw_LINE(p, brw_null_reg(), interp[i], deltas[0]);
	    brw_MAC(p, dst[i], suboffset(interp[i],1), deltas[1]);
	 }
      }
   }
}
Ejemplo n.º 24
0
void
brw_blorp_const_color_program::alloc_regs()
{
   int reg = 0;
   this->R0 = retype(brw_vec8_grf(reg++, 0), BRW_REGISTER_TYPE_UW);
   this->R1 = retype(brw_vec8_grf(reg++, 0), BRW_REGISTER_TYPE_UW);

   prog_data.first_curbe_grf = reg;
   clear_rgba = retype(brw_vec4_grf(reg++, 0), BRW_REGISTER_TYPE_F);
   reg += BRW_BLORP_NUM_PUSH_CONST_REGS;

   /* Make sure we didn't run out of registers */
   assert(reg <= GEN7_MRF_HACK_START);

   this->base_mrf = 2;
}
Ejemplo n.º 25
0
void
vec4_generator::generate_pull_constant_load(vec4_instruction *inst,
                                            struct brw_reg dst,
                                            struct brw_reg index,
                                            struct brw_reg offset)
{
   assert(index.file == BRW_IMMEDIATE_VALUE &&
	  index.type == BRW_REGISTER_TYPE_UD);
   uint32_t surf_index = index.dw1.ud;

   if (intel->gen == 7) {
      gen6_resolve_implied_move(p, &offset, inst->base_mrf);
      brw_instruction *insn = brw_next_insn(p, BRW_OPCODE_SEND);
      brw_set_dest(p, insn, dst);
      brw_set_src0(p, insn, offset);
      brw_set_sampler_message(p, insn,
                              surf_index,
                              0, /* LD message ignores sampler unit */
                              GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
                              1, /* rlen */
                              1, /* mlen */
                              false, /* no header */
                              BRW_SAMPLER_SIMD_MODE_SIMD4X2,
                              0);
      return;
   }

   struct brw_reg header = brw_vec8_grf(0, 0);

   gen6_resolve_implied_move(p, &header, inst->base_mrf);

   brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1), BRW_REGISTER_TYPE_D),
	   offset);

   uint32_t msg_type;

   if (intel->gen >= 6)
      msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
   else if (intel->gen == 5 || intel->is_g4x)
      msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
   else
      msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;

   /* Each of the 8 channel enables is considered for whether each
    * dword is written.
    */
   struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND);
   brw_set_dest(p, send, dst);
   brw_set_src0(p, send, header);
   if (intel->gen < 6)
      send->header.destreg__conditionalmod = inst->base_mrf;
   brw_set_dp_read_message(p, send,
			   surf_index,
			   BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
			   msg_type,
			   BRW_DATAPORT_READ_TARGET_DATA_CACHE,
			   2, /* mlen */
			   1 /* rlen */);
}
Ejemplo n.º 26
0
/** Init the "undef" register */
static void pass0_init_undef( struct brw_wm_compile *c)
{
   struct brw_wm_ref *ref = &c->undef_ref;
   ref->value = &c->undef_value;
   ref->hw_reg = brw_vec8_grf(0, 0);
   ref->insn = 0;
   ref->prevuse = NULL;
}
Ejemplo n.º 27
0
/* Points setup - several simplifications as all attributes are
 * constant across the face of the point (point sprites excluded!)
 */
void brw_emit_point_setup(struct brw_sf_compile *c, bool allocate)
{
   struct brw_compile *p = &c->func;
   GLuint i;

   c->flag_value = 0xff;
   c->nr_verts = 1;

   if (allocate)
      alloc_regs(c);

   copy_z_inv_w(c);

   brw_MOV(p, c->m1Cx, brw_imm_ud(0)); /* zero - move out of loop */
   brw_MOV(p, c->m2Cy, brw_imm_ud(0)); /* zero - move out of loop */

   for (i = 0; i < c->nr_setup_regs; i++)
   {
      struct brw_reg a0 = offset(c->vert[0], i);
      GLushort pc, pc_persp, pc_linear;
      bool last = calculate_masks(c, i, &pc, &pc_persp, &pc_linear);

      if (pc_persp)
      {				
	 /* This seems odd as the values are all constant, but the
	  * fragment shader will be expecting it:
	  */
	 set_predicate_control_flag_value(p, c, pc_persp);
	 brw_MUL(p, a0, a0, c->inv_w[0]);
      }


      /* The delta values are always zero, just send the starting
       * coordinate.  Again, this is to fit in with the interpolation
       * code in the fragment shader.
       */
      {
	 set_predicate_control_flag_value(p, c, pc);

	 brw_MOV(p, c->m3C0, a0); /* constant value */

	 /* Copy m0..m3 to URB.
	  */
	 brw_urb_WRITE(p,
		       brw_null_reg(),
		       0,
		       brw_vec8_grf(0, 0),
                       last ? BRW_URB_WRITE_EOT_COMPLETE
                       : BRW_URB_WRITE_NO_FLAGS,
		       4, 	/* msg len */
		       0,	/* response len */
		       i*4,	/* urb destination offset */
		       BRW_URB_SWIZZLE_TRANSPOSE);
      }
   }

   brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
}
Ejemplo n.º 28
0
void
gen8_vec4_generator::generate_gs_set_vertex_count(struct brw_reg eot_mrf_header,
                                                  struct brw_reg src)
{
   /* Move the vertex count into the second MRF for the EOT write. */
   assert(eot_mrf_header.file == BRW_MESSAGE_REGISTER_FILE);
   int dst_nr = GEN7_MRF_HACK_START + eot_mrf_header.nr + 1;
   MOV(retype(brw_vec8_grf(dst_nr, 0), BRW_REGISTER_TYPE_UD), src);
}
Ejemplo n.º 29
0
static void
gen_PLN_MRF_GRF_GRF(struct brw_codegen *p)
{
   struct brw_reg m6 = brw_vec8_reg(BRW_MESSAGE_REGISTER_FILE, 6, 0);
   struct brw_reg interp = brw_vec1_grf(2, 0);
   struct brw_reg g4 = brw_vec8_grf(4, 0);

   brw_PLN(p, m6, interp, g4);
}
Ejemplo n.º 30
0
static void fire_fb_write( struct brw_wm_compile *c,
			   GLuint base_reg,
			   GLuint nr,
			   GLuint target,
			   GLuint eot )
{
   struct brw_compile *p = &c->func;
   struct intel_context *intel = &p->brw->intel;
   uint32_t msg_control;

   /* Pass through control information:
    * 
    * Gen6 has done m1 mov in emit_fb_write() for current SIMD16 case.
    */
/*  mov (8) m1.0<1>:ud   r1.0<8;8,1>:ud   { Align1 NoMask } */
   if (intel->gen < 6)
   {
      brw_push_insn_state(p);
      brw_set_mask_control(p, BRW_MASK_DISABLE); /* ? */
      brw_set_compression_control(p, BRW_COMPRESSION_NONE);
      brw_MOV(p, 
	       brw_message_reg(base_reg + 1),
	       brw_vec8_grf(1, 0));
      brw_pop_insn_state(p);
   }

   if (c->dispatch_width == 16)
      msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE;
   else
      msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01;

   /* Send framebuffer write message: */
/*  send (16) null.0<1>:uw m0               r0.0<8;8,1>:uw   0x85a04000:ud    { Align1 EOT } */
   brw_fb_WRITE(p,
		c->dispatch_width,
		base_reg,
		retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW),
		msg_control,
		target,		
		nr,
		0, 
		eot,
		true);
}