void brw_fb_WRITE(struct brw_compile *p, struct brw_reg dest, GLuint msg_reg_nr, struct brw_reg src0, GLuint binding_table_index, GLuint msg_length, GLuint response_length, GLboolean eot) { struct brw_instruction *insn = next_insn(p, BRW_OPCODE_SEND); insn->header.predicate_control = 0; /* XXX */ insn->header.compression_control = BRW_COMPRESSION_NONE; insn->header.destreg__conditionalmod = msg_reg_nr; brw_set_dest(insn, dest); brw_set_src0(insn, src0); brw_set_dp_write_message(p->brw, insn, binding_table_index, BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE, /* msg_control */ BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE, /* msg_type */ msg_length, 1, /* pixel scoreboard */ response_length, eot); }
static void brw_fb_write(struct brw_compile *p, int dw) { struct brw_instruction *insn; unsigned msg_control, msg_type, msg_len; struct brw_reg src0; bool header; if (dw == 16) { brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED); msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE; msg_len = 8; } else { brw_set_compression_control(p, BRW_COMPRESSION_NONE); msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01; msg_len = 4; } if (p->gen < 060) { brw_push_insn_state(p); brw_set_compression_control(p, BRW_COMPRESSION_NONE); brw_set_mask_control(p, BRW_MASK_DISABLE); brw_MOV(p, brw_message_reg(1), brw_vec8_grf(1, 0)); brw_pop_insn_state(p); msg_len += 2; } /* The execution mask is ignored for render target writes. */ insn = brw_next_insn(p, BRW_OPCODE_SEND); insn->header.predicate_control = 0; insn->header.compression_control = BRW_COMPRESSION_NONE; if (p->gen >= 060) { msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE; src0 = brw_message_reg(2); header = false; } else { insn->header.destreg__conditionalmod = 0; msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE; src0 = __retype_uw(brw_vec8_grf(0, 0)); header = true; } brw_set_dest(p, insn, null_result(dw)); brw_set_src0(p, insn, src0); brw_set_dp_write_message(p, insn, 0, msg_control, msg_type, msg_len, header, true, 0, true, false); }
/** * Write block of 16 dwords/floats to the data port Render Cache scratch buffer. * Scratch offset should be a multiple of 64. * Used for register spilling. */ void brw_dp_WRITE_16( struct brw_compile *p, struct brw_reg src, GLuint scratch_offset ) { GLuint msg_reg_nr = 1; { brw_push_insn_state(p); brw_set_mask_control(p, BRW_MASK_DISABLE); brw_set_compression_control(p, BRW_COMPRESSION_NONE); /* set message header global offset field (reg 0, element 2) */ brw_MOV(p, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_D), brw_imm_d(scratch_offset)); brw_pop_insn_state(p); } { GLuint msg_length = 3; struct brw_reg dest = retype(brw_null_reg(), BRW_REGISTER_TYPE_UW); struct brw_instruction *insn = next_insn(p, BRW_OPCODE_SEND); insn->header.predicate_control = 0; /* XXX */ insn->header.compression_control = BRW_COMPRESSION_NONE; insn->header.destreg__conditionalmod = msg_reg_nr; brw_set_dest(insn, dest); brw_set_src0(insn, src); brw_set_dp_write_message(p->brw, insn, 255, /* binding table index (255=stateless) */ BRW_DATAPORT_OWORD_BLOCK_4_OWORDS, /* msg_control */ BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE, /* msg_type */ msg_length, 0, /* pixel scoreboard */ 0, /* response_length */ 0); /* eot */ } }
void vec4_generator::generate_scratch_write(vec4_instruction *inst, struct brw_reg dst, struct brw_reg src, struct brw_reg index) { struct brw_reg header = brw_vec8_grf(0, 0); bool write_commit; /* If the instruction is predicated, we'll predicate the send, not * the header setup. */ brw_set_predicate_control(p, false); gen6_resolve_implied_move(p, &header, inst->base_mrf); generate_oword_dual_block_offsets(brw_message_reg(inst->base_mrf + 1), index); brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 2), BRW_REGISTER_TYPE_D), retype(src, BRW_REGISTER_TYPE_D)); uint32_t msg_type; if (brw->gen >= 7) msg_type = GEN7_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE; else if (brw->gen == 6) msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE; else msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE; brw_set_predicate_control(p, inst->predicate); /* Pre-gen6, we have to specify write commits to ensure ordering * between reads and writes within a thread. Afterwards, that's * guaranteed and write commits only matter for inter-thread * synchronization. */ if (brw->gen >= 6) { write_commit = false; } else { /* The visitor set up our destination register to be g0. This * means that when the next read comes along, we will end up * reading from g0 and causing a block on the write commit. For * write-after-read, we are relying on the value of the previous * read being used (and thus blocking on completion) before our * write is executed. This means we have to be careful in * instruction scheduling to not violate this assumption. */ write_commit = true; } /* Each of the 8 channel enables is considered for whether each * dword is written. */ struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND); brw_set_dest(p, send, dst); brw_set_src0(p, send, header); if (brw->gen < 6) send->header.destreg__conditionalmod = inst->base_mrf; brw_set_dp_write_message(p, send, 255, /* binding table index: stateless access */ BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD, msg_type, 3, /* mlen */ true, /* header present */ false, /* not a render target write */ write_commit, /* rlen */ false, /* eot */ write_commit); }