static void fs_lower_opcode_kil(struct toy_compiler *tc, struct toy_inst *inst) { struct toy_dst pixel_mask_dst; struct toy_src f0, pixel_mask; struct toy_inst *tmp; /* lower half of r1.7:ud */ pixel_mask_dst = tdst_uw(tdst(TOY_FILE_GRF, 1, 7 * 4)); pixel_mask = tsrc_rect(tsrc_from(pixel_mask_dst), TOY_RECT_010); f0 = tsrc_rect(tsrc_uw(tsrc(TOY_FILE_ARF, BRW_ARF_FLAG, 0)), TOY_RECT_010); /* KILP or KIL */ if (tsrc_is_null(inst->src[0])) { struct toy_src dummy = tsrc_uw(tsrc(TOY_FILE_GRF, 0, 0)); struct toy_dst f0_dst = tdst_uw(tdst(TOY_FILE_ARF, BRW_ARF_FLAG, 0)); /* create a mask that masks out all pixels */ tmp = tc_MOV(tc, f0_dst, tsrc_rect(tsrc_imm_uw(0xffff), TOY_RECT_010)); tmp->exec_size = BRW_EXECUTE_1; tmp->mask_ctrl = BRW_MASK_DISABLE; tc_CMP(tc, tdst_null(), dummy, dummy, BRW_CONDITIONAL_NEQ); /* swapping the two src operands breaks glBitmap()!? */ tmp = tc_AND(tc, pixel_mask_dst, f0, pixel_mask); tmp->exec_size = BRW_EXECUTE_1; tmp->mask_ctrl = BRW_MASK_DISABLE; } else { struct toy_src src[4]; int i; tsrc_transpose(inst->src[0], src); /* mask out killed pixels */ for (i = 0; i < 4; i++) { tc_CMP(tc, tdst_null(), src[i], tsrc_imm_f(0.0f), BRW_CONDITIONAL_GE); /* swapping the two src operands breaks glBitmap()!? */ tmp = tc_AND(tc, pixel_mask_dst, f0, pixel_mask); tmp->exec_size = BRW_EXECUTE_1; tmp->mask_ctrl = BRW_MASK_DISABLE; } } tc_discard_inst(tc, inst); }
/** * Initialize the instruction template, from which tc_add() initializes the * newly added instructions. */ static void tc_init_inst_templ(struct toy_compiler *tc) { struct toy_inst *templ = &tc->templ; int i; templ->opcode = GEN6_OPCODE_NOP; templ->access_mode = GEN6_ALIGN_1; templ->mask_ctrl = GEN6_MASKCTRL_NORMAL; templ->dep_ctrl = GEN6_DEPCTRL_NORMAL; templ->qtr_ctrl = GEN6_QTRCTRL_1Q; templ->thread_ctrl = GEN6_THREADCTRL_NORMAL; templ->pred_ctrl = GEN6_PREDCTRL_NONE; templ->pred_inv = false; templ->exec_size = GEN6_EXECSIZE_1; templ->cond_modifier = GEN6_COND_NORMAL; templ->acc_wr_ctrl = false; templ->saturate = false; templ->marker = false; templ->dst = tdst_null(); for (i = 0; i < Elements(templ->src); i++) templ->src[i] = tsrc_null(); for (i = 0; i < Elements(templ->tex.offsets); i++) templ->tex.offsets[i] = tsrc_null(); list_inithead(&templ->list); }
static void gs_write_vue(struct gs_compile_context *gcc, struct toy_dst dst, struct toy_src msg_header, const struct toy_src *outs, int num_outs, bool eot) { struct toy_compiler *tc = &gcc->tc; struct toy_dst mrf_header; struct toy_src desc; int sent = 0; mrf_header = tdst_d(tdst(TOY_FILE_MRF, gcc->first_free_mrf, 0)); gs_COPY8(tc, mrf_header, msg_header); while (sent < num_outs) { int mrf = gcc->first_free_mrf + 1; const int mrf_avail = gcc->last_free_mrf - mrf + 1; int msg_len, num_entries, i; bool complete; num_entries = (num_outs - sent + 1) / 2; complete = true; if (num_entries > mrf_avail) { num_entries = mrf_avail; complete = false; } for (i = 0; i < num_entries; i++) { gs_COPY4(tc, tdst(TOY_FILE_MRF, mrf + i / 2, 0), 0, outs[sent + 2 * i], 0); if (sent + i * 2 + 1 < gcc->shader->out.count) { gs_COPY4(tc, tdst(TOY_FILE_MRF, mrf + i / 2, 0), 4, outs[sent + 2 * i + 1], 0); } mrf++; } /* do not forget the header */ msg_len = num_entries + 1; if (complete) { desc = tsrc_imm_mdesc_urb(tc, eot, msg_len, !eot, true, true, !eot, false, sent, 0); } else { desc = tsrc_imm_mdesc_urb(tc, false, msg_len, 0, false, true, false, false, sent, 0); } tc_add2(tc, TOY_OPCODE_URB_WRITE, (complete) ? dst : tdst_null(), tsrc_from(mrf_header), desc); sent += num_entries * 2; } }
static void gs_discard(struct gs_compile_context *gcc) { struct toy_compiler *tc = &gcc->tc; struct toy_dst mrf_header; struct toy_src desc; mrf_header = tdst_d(tdst(TOY_FILE_MRF, gcc->first_free_mrf, 0)); gs_COPY8(tc, mrf_header, tsrc_from(gcc->vars.urb_write_header)); desc = tsrc_imm_mdesc_urb(tc, true, 1, 0, true, false, false, false, 0, 0); tc_add2(tc, TOY_OPCODE_URB_WRITE, tdst_null(), tsrc_from(mrf_header), desc); }
static void gs_lower_opcode_emit_so_dynamic(struct gs_compile_context *gcc) { struct toy_compiler *tc = &gcc->tc; tc_IF(tc, tdst_null(), tsrc_from(gcc->dynamic_data.num_vertices_in_prim), tsrc_imm_d(gcc->out_vue_min_count), GEN6_COND_GE); { tc_ADD(tc, gcc->vars.tmp, tsrc_from(gcc->vars.so_index), tsrc_imm_d(0x03020100)); /* TODO same as static version */ } tc_ENDIF(tc); tc_ADD(tc, gcc->vars.so_index, tsrc_from(gcc->vars.so_index), tsrc_imm_d(gcc->out_vue_min_count)); }
static void cs_dummy(struct cs_compile_context *ccc) { struct toy_compiler *tc = &ccc->tc; struct toy_dst header; struct toy_src r0, desc; struct toy_inst *inst; header = tdst_ud(tdst(TOY_FILE_MRF, ccc->first_free_mrf, 0)); r0 = tsrc_ud(tsrc(TOY_FILE_GRF, 0, 0)); inst = tc_MOV(tc, header, r0); inst->exec_size = GEN6_EXECSIZE_8; inst->mask_ctrl = GEN6_MASKCTRL_NOMASK; desc = tsrc_imm_mdesc(tc, true, 1, 0, true, GEN6_MSG_TS_RESOURCE_SELECT_NO_DEREF | GEN6_MSG_TS_REQUESTER_TYPE_ROOT | GEN6_MSG_TS_OPCODE_DEREF); tc_SEND(tc, tdst_null(), tsrc_from(header), desc, GEN6_SFID_SPAWNER); }
static void gs_lower_opcode_tgsi_in(struct gs_compile_context *gcc, struct toy_dst dst, int dim, int idx) { struct toy_compiler *tc = &gcc->tc; struct toy_src attr; int slot, reg = -1, subreg; slot = toy_tgsi_find_input(&gcc->tgsi, idx); if (slot >= 0) { int i; for (i = 0; i < gcc->variant->u.gs.num_inputs; i++) { if (gcc->variant->u.gs.semantic_names[i] == gcc->tgsi.inputs[slot].semantic_name && gcc->variant->u.gs.semantic_indices[i] == gcc->tgsi.inputs[slot].semantic_index) { reg = i / 2; subreg = (i % 2) * 4; break; } } } if (reg < 0) { tc_MOV(tc, dst, tsrc_imm_f(0.0f)); return; } /* fix vertex ordering for GEN6_3DPRIM_TRISTRIP_REVERSE */ if (gcc->in_vue_count == 3 && dim < 2) { struct toy_inst *inst; /* get PrimType */ inst = tc_AND(tc, tdst_d(gcc->vars.tmp), tsrc_offset(gcc->payload.header, 0, 2), tsrc_imm_d(0x1f)); inst->exec_size = GEN6_EXECSIZE_1; inst->src[0] = tsrc_rect(inst->src[0], TOY_RECT_010); inst->src[1] = tsrc_rect(inst->src[1], TOY_RECT_010); inst = tc_CMP(tc, tdst_null(), tsrc_from(tdst_d(gcc->vars.tmp)), tsrc_imm_d(GEN6_3DPRIM_TRISTRIP_REVERSE), GEN6_COND_NZ); inst->src[0] = tsrc_rect(inst->src[0], TOY_RECT_010); attr = tsrc_offset(gcc->payload.vues[dim], reg, subreg); inst = tc_MOV(tc, dst, attr); inst->pred_ctrl = GEN6_PREDCTRL_NORMAL; /* swap IN[0] and IN[1] for GEN6_3DPRIM_TRISTRIP_REVERSE */ dim = !dim; attr = tsrc_offset(gcc->payload.vues[dim], reg, subreg); inst = tc_MOV(tc, dst, attr); inst->pred_ctrl = GEN6_PREDCTRL_NORMAL; inst->pred_inv = true; } else { attr = tsrc_offset(gcc->payload.vues[dim], reg, subreg); tc_MOV(tc, dst, attr); } }
static void gs_lower_opcode_emit_so_static(struct gs_compile_context *gcc) { struct toy_compiler *tc = &gcc->tc; struct toy_inst *inst; int i, j; if (gcc->static_data.num_vertices_in_prim < gcc->out_vue_min_count) return; inst = tc_MOV(tc, tdst_w(gcc->vars.tmp), tsrc_imm_v(0x03020100)); inst->exec_size = GEN6_EXECSIZE_8; inst->mask_ctrl = GEN6_MASKCTRL_NOMASK; tc_ADD(tc, tdst_d(gcc->vars.tmp), tsrc_from(tdst_d(gcc->vars.tmp)), tsrc_rect(tsrc_from(gcc->vars.so_index), TOY_RECT_010)); tc_IF(tc, tdst_null(), tsrc_rect(tsrc_offset(tsrc_from(tdst_d(gcc->vars.tmp)), 0, gcc->out_vue_min_count - 1), TOY_RECT_010), tsrc_rect(tsrc_offset(gcc->payload.svbi, 0, 4), TOY_RECT_010), GEN6_COND_LE); { for (i = 0; i < gcc->out_vue_min_count; i++) { for (j = 0; j < gcc->so_info->num_outputs; j++) { const int idx = gcc->so_info->output[j].register_index; struct toy_src index, out; int binding_table_index; bool write_commit; index = tsrc_d(tsrc_offset(tsrc_from(gcc->vars.tmp), 0, i)); if (i == gcc->out_vue_min_count - 1) { out = gcc->vars.tgsi_outs[idx]; } else { /* gcc->vars.buffer_cur also points to the first vertex */ const int buf = (gcc->vars.buffer_cur + i) % gcc->vars.buffer_needed; out = tsrc_offset(tsrc_from(gcc->vars.buffers[buf]), idx, 0); } out = tsrc_offset(out, 0, gcc->so_info->output[j].start_component); /* * From the Sandy Bridge PRM, volume 4 part 2, page 19: * * "The Kernel must do a write commit on the last write to DAP * prior to a URB_WRITE with End of Thread." */ write_commit = (gcc->static_data.num_vertices == gcc->static_data.total_vertices && i == gcc->out_vue_min_count - 1 && j == gcc->so_info->num_outputs - 1); binding_table_index = gcc->shader->bt.gen6_so_base + j; gs_write_so(gcc, gcc->vars.tmp, index, out, write_commit, binding_table_index); /* * From the Sandy Bridge PRM, volume 4 part 1, page 168: * * "The write commit does not modify the destination register, but * merely clears the dependency associated with the destination * register. Thus, a simple "mov" instruction using the register as a * source is sufficient to wait for the write commit to occur." */ if (write_commit) tc_MOV(tc, gcc->vars.tmp, tsrc_from(gcc->vars.tmp)); } } /* SONumPrimsWritten occupies the higher word of m0.2 of URB_WRITE */ tc_ADD(tc, gcc->vars.so_written, tsrc_from(gcc->vars.so_written), tsrc_imm_d(1 << 16)); tc_ADD(tc, gcc->vars.so_index, tsrc_from(gcc->vars.so_index), tsrc_imm_d(gcc->out_vue_min_count)); } tc_ENDIF(tc); }
/** * Emit instructions to write the VUE. */ static void vs_write_vue(struct vs_compile_context *vcc) { struct toy_compiler *tc = &vcc->tc; struct toy_src outs[PIPE_MAX_SHADER_OUTPUTS]; struct toy_dst header; struct toy_src r0; struct toy_inst *inst; int sent_attrs, total_attrs; header = tdst_ud(tdst(TOY_FILE_MRF, vcc->first_free_mrf, 0)); r0 = tsrc_ud(tsrc(TOY_FILE_GRF, 0, 0)); inst = tc_MOV(tc, header, r0); inst->mask_ctrl = GEN6_MASKCTRL_NOMASK; if (ilo_dev_gen(tc->dev) >= ILO_GEN(7)) { inst = tc_OR(tc, tdst_offset(header, 0, 5), tsrc_rect(tsrc_offset(r0, 0, 5), TOY_RECT_010), tsrc_rect(tsrc_imm_ud(0xff00), TOY_RECT_010)); inst->exec_size = GEN6_EXECSIZE_1; inst->access_mode = GEN6_ALIGN_1; inst->mask_ctrl = GEN6_MASKCTRL_NOMASK; } total_attrs = vs_collect_outputs(vcc, outs); sent_attrs = 0; while (sent_attrs < total_attrs) { struct toy_src desc; int mrf = vcc->first_free_mrf + 1, avail_mrf_for_attrs; int num_attrs, msg_len, i; bool eot; num_attrs = total_attrs - sent_attrs; eot = true; /* see if we need another message */ avail_mrf_for_attrs = vcc->last_free_mrf - mrf + 1; if (num_attrs > avail_mrf_for_attrs) { /* * From the Sandy Bridge PRM, volume 4 part 2, page 22: * * "Offset. This field specifies a destination offset (in 256-bit * units) from the start of the URB entry(s), as referenced by * URB Return Handle n, at which the data (if any) will be * written." * * As we need to offset the following messages, we must make sure * this one writes an even number of attributes. */ num_attrs = avail_mrf_for_attrs & ~1; eot = false; } if (ilo_dev_gen(tc->dev) >= ILO_GEN(7)) { /* do not forget about the header */ msg_len = 1 + num_attrs; } else { /* * From the Sandy Bridge PRM, volume 4 part 2, page 26: * * "At least 256 bits per vertex (512 bits total, M1 & M2) must * be written. Writing only 128 bits per vertex (256 bits * total, M1 only) results in UNDEFINED operation." * * "[DevSNB] Interleave writes must be in multiples of 256 per * vertex." * * That is, we must write or appear to write an even number of * attributes, starting from two. */ if (num_attrs % 2 && num_attrs == avail_mrf_for_attrs) { num_attrs--; eot = false; } msg_len = 1 + align(num_attrs, 2); } for (i = 0; i < num_attrs; i++) tc_MOV(tc, tdst(TOY_FILE_MRF, mrf++, 0), outs[sent_attrs + i]); assert(sent_attrs % 2 == 0); desc = tsrc_imm_mdesc_urb(tc, eot, msg_len, 0, eot, true, false, true, sent_attrs / 2, 0); tc_add2(tc, TOY_OPCODE_URB_WRITE, tdst_null(), tsrc_from(header), desc); sent_attrs += num_attrs; } }
/** * Emit instructions to write the color buffers (and the depth buffer). */ static void fs_write_fb(struct fs_compile_context *fcc) { struct toy_compiler *tc = &fcc->tc; int base_mrf = fcc->first_free_mrf; const struct toy_dst header = tdst_ud(tdst(TOY_FILE_MRF, base_mrf, 0)); bool header_present = false; struct toy_src desc; unsigned msg_type, ctrl; int color_slots[ILO_MAX_DRAW_BUFFERS], num_cbufs; int pos_slot = -1, cbuf, i; for (i = 0; i < Elements(color_slots); i++) color_slots[i] = -1; for (i = 0; i < fcc->tgsi.num_outputs; i++) { if (fcc->tgsi.outputs[i].semantic_name == TGSI_SEMANTIC_COLOR) { assert(fcc->tgsi.outputs[i].semantic_index < Elements(color_slots)); color_slots[fcc->tgsi.outputs[i].semantic_index] = i; } else if (fcc->tgsi.outputs[i].semantic_name == TGSI_SEMANTIC_POSITION) { pos_slot = i; } } num_cbufs = fcc->variant->u.fs.num_cbufs; /* still need to send EOT (and probably depth) */ if (!num_cbufs) num_cbufs = 1; /* we need the header to specify the pixel mask or render target */ if (fcc->tgsi.uses_kill || num_cbufs > 1) { const struct toy_src r0 = tsrc_ud(tsrc(TOY_FILE_GRF, 0, 0)); struct toy_inst *inst; inst = tc_MOV(tc, header, r0); inst->mask_ctrl = BRW_MASK_DISABLE; base_mrf += fcc->num_grf_per_vrf; /* this is a two-register header */ if (fcc->dispatch_mode == GEN6_WM_8_DISPATCH_ENABLE) { inst = tc_MOV(tc, tdst_offset(header, 1, 0), tsrc_offset(r0, 1, 0)); inst->mask_ctrl = BRW_MASK_DISABLE; base_mrf += fcc->num_grf_per_vrf; } header_present = true; } for (cbuf = 0; cbuf < num_cbufs; cbuf++) { const int slot = color_slots[(fcc->tgsi.props.fs_color0_writes_all_cbufs) ? 0 : cbuf]; int mrf = base_mrf, vrf; struct toy_src src[4]; if (slot >= 0) { const unsigned undefined_mask = fcc->tgsi.outputs[slot].undefined_mask; const int index = fcc->tgsi.outputs[slot].index; vrf = toy_tgsi_get_vrf(&fcc->tgsi, TGSI_FILE_OUTPUT, 0, index); if (vrf >= 0) { const struct toy_src tmp = tsrc(TOY_FILE_VRF, vrf, 0); tsrc_transpose(tmp, src); } else { /* use (0, 0, 0, 0) */ tsrc_transpose(tsrc_imm_f(0.0f), src); } for (i = 0; i < 4; i++) { const struct toy_dst dst = tdst(TOY_FILE_MRF, mrf, 0); if (undefined_mask & (1 << i)) src[i] = tsrc_imm_f(0.0f); tc_MOV(tc, dst, src[i]); mrf += fcc->num_grf_per_vrf; } } else { /* use (0, 0, 0, 0) */ for (i = 0; i < 4; i++) { const struct toy_dst dst = tdst(TOY_FILE_MRF, mrf, 0); tc_MOV(tc, dst, tsrc_imm_f(0.0f)); mrf += fcc->num_grf_per_vrf; } } /* select BLEND_STATE[rt] */ if (cbuf > 0) { struct toy_inst *inst; inst = tc_MOV(tc, tdst_offset(header, 0, 2), tsrc_imm_ud(cbuf)); inst->mask_ctrl = BRW_MASK_DISABLE; inst->exec_size = BRW_EXECUTE_1; inst->src[0].rect = TOY_RECT_010; } if (cbuf == 0 && pos_slot >= 0) { const int index = fcc->tgsi.outputs[pos_slot].index; const struct toy_dst dst = tdst(TOY_FILE_MRF, mrf, 0); struct toy_src src[4]; int vrf; vrf = toy_tgsi_get_vrf(&fcc->tgsi, TGSI_FILE_OUTPUT, 0, index); if (vrf >= 0) { const struct toy_src tmp = tsrc(TOY_FILE_VRF, vrf, 0); tsrc_transpose(tmp, src); } else { /* use (0, 0, 0, 0) */ tsrc_transpose(tsrc_imm_f(0.0f), src); } /* only Z */ tc_MOV(tc, dst, src[2]); mrf += fcc->num_grf_per_vrf; } msg_type = (fcc->dispatch_mode == GEN6_WM_16_DISPATCH_ENABLE) ? BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE : BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01; ctrl = (cbuf == num_cbufs - 1) << 12 | msg_type << 8; desc = tsrc_imm_mdesc_data_port(tc, cbuf == num_cbufs - 1, mrf - fcc->first_free_mrf, 0, header_present, false, GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE, ctrl, ILO_WM_DRAW_SURFACE(cbuf)); tc_add2(tc, TOY_OPCODE_FB_WRITE, tdst_null(), tsrc(TOY_FILE_MRF, fcc->first_free_mrf, 0), desc); } }