static void gs_setup_vars(struct gs_compile_context *gcc) { int grf = gcc->first_free_grf; int i; gcc->vars.urb_write_header = tdst_d(tdst(TOY_FILE_GRF, grf, 0)); grf++; gcc->vars.tmp = tdst(TOY_FILE_GRF, grf, 0); grf++; if (gcc->write_so) { gcc->vars.buffer_needed = gcc->out_vue_min_count - 1; for (i = 0; i < gcc->vars.buffer_needed; i++) { gcc->vars.buffers[i] = tdst(TOY_FILE_GRF, grf, 0); grf += gcc->shader->out.count; } gcc->vars.so_written = tdst_d(tdst(TOY_FILE_GRF, grf, 0)); grf++; gcc->vars.so_index = tdst_d(tdst(TOY_FILE_GRF, grf, 0)); grf++; } gcc->first_free_grf = grf; if (!gcc->tgsi.reg_mapping) { for (i = 0; i < gcc->shader->out.count; i++) gcc->vars.tgsi_outs[i] = tsrc(TOY_FILE_GRF, grf++, 0); gcc->first_free_grf = grf; return; } for (i = 0; i < gcc->shader->out.count; i++) { const int slot = gcc->output_map[i]; const int vrf = (slot >= 0) ? toy_tgsi_get_vrf(&gcc->tgsi, TGSI_FILE_OUTPUT, 0, gcc->tgsi.outputs[slot].index) : -1; if (vrf >= 0) gcc->vars.tgsi_outs[i] = tsrc(TOY_FILE_VRF, vrf, 0); else gcc->vars.tgsi_outs[i] = (i == 0) ? tsrc_imm_d(0) : tsrc_imm_f(0.0f); } }
static void gs_lower_opcode_tgsi_direct(struct gs_compile_context *gcc, struct toy_inst *inst) { struct toy_compiler *tc = &gcc->tc; int dim, idx; assert(inst->src[0].file == TOY_FILE_IMM); dim = inst->src[0].val32; assert(inst->src[1].file == TOY_FILE_IMM); idx = inst->src[1].val32; switch (inst->opcode) { case TOY_OPCODE_TGSI_IN: gs_lower_opcode_tgsi_in(gcc, inst->dst, dim, idx); /* fetch all dimensions */ if (dim == 0) { int i; for (i = 1; i < gcc->in_vue_count; i++) { const int vrf = toy_tgsi_get_vrf(&gcc->tgsi, TGSI_FILE_INPUT, i, idx); struct toy_dst dst; if (vrf < 0) continue; dst = tdst(TOY_FILE_VRF, vrf, 0); gs_lower_opcode_tgsi_in(gcc, dst, i, idx); } } break; case TOY_OPCODE_TGSI_IMM: assert(!dim); gs_lower_opcode_tgsi_imm(gcc, inst->dst, idx); break; case TOY_OPCODE_TGSI_CONST: case TOY_OPCODE_TGSI_SV: default: tc_fail(tc, "unhandled TGSI fetch"); break; } tc_discard_inst(tc, inst); }
/** * Collect the toy registers to be written to the VUE. */ static int vs_collect_outputs(struct vs_compile_context *vcc, struct toy_src *outs) { const struct toy_tgsi *tgsi = &vcc->tgsi; unsigned i; for (i = 0; i < vcc->shader->out.count; i++) { const int slot = vcc->output_map[i]; const int vrf = (slot >= 0) ? toy_tgsi_get_vrf(tgsi, TGSI_FILE_OUTPUT, 0, tgsi->outputs[slot].index) : -1; struct toy_src src; if (vrf >= 0) { struct toy_dst dst; dst = tdst(TOY_FILE_VRF, vrf, 0); src = tsrc_from(dst); if (i == 0) { /* PSIZE is at channel W */ tc_MOV(&vcc->tc, tdst_writemask(dst, TOY_WRITEMASK_W), tsrc_swizzle1(src, TOY_SWIZZLE_X)); /* the other channels are for the header */ dst = tdst_d(dst); tc_MOV(&vcc->tc, tdst_writemask(dst, TOY_WRITEMASK_XYZ), tsrc_imm_d(0)); } else { /* initialize unused channels to 0.0f */ if (tgsi->outputs[slot].undefined_mask) { dst = tdst_writemask(dst, tgsi->outputs[slot].undefined_mask); tc_MOV(&vcc->tc, dst, tsrc_imm_f(0.0f)); } } } else { /* XXX this is too ugly */ if (vcc->shader->out.semantic_names[i] == TGSI_SEMANTIC_CLIPDIST && slot < 0) { /* ok, we need to compute clip distance */ int clipvert_slot = -1, clipvert_vrf, j; for (j = 0; j < tgsi->num_outputs; j++) { if (tgsi->outputs[j].semantic_name == TGSI_SEMANTIC_CLIPVERTEX) { clipvert_slot = j; break; } else if (tgsi->outputs[j].semantic_name == TGSI_SEMANTIC_POSITION) { /* remember pos, but keep looking */ clipvert_slot = j; } } clipvert_vrf = (clipvert_slot >= 0) ? toy_tgsi_get_vrf(tgsi, TGSI_FILE_OUTPUT, 0, tgsi->outputs[clipvert_slot].index) : -1; if (clipvert_vrf >= 0) { struct toy_dst tmp = tc_alloc_tmp(&vcc->tc); struct toy_src clipvert = tsrc(TOY_FILE_VRF, clipvert_vrf, 0); int first_ucp, last_ucp; if (vcc->shader->out.semantic_indices[i]) { first_ucp = 4; last_ucp = MIN2(7, vcc->variant->u.vs.num_ucps - 1); } else { first_ucp = 0; last_ucp = MIN2(3, vcc->variant->u.vs.num_ucps - 1); } for (j = first_ucp; j <= last_ucp; j++) { const int plane_grf = vcc->first_ucp_grf + j / 2; const int plane_subreg = (j & 1) * 16; const struct toy_src plane = tsrc_rect(tsrc(TOY_FILE_GRF, plane_grf, plane_subreg), TOY_RECT_041); const unsigned writemask = 1 << ((j >= 4) ? j - 4 : j); tc_DP4(&vcc->tc, tdst_writemask(tmp, writemask), clipvert, plane); } src = tsrc_from(tmp); } else { src = tsrc_imm_f(0.0f); } } else { src = (i == 0) ? tsrc_imm_d(0) : tsrc_imm_f(0.0f); } } outs[i] = src; } return i; }
/** * Emit instructions to write the color buffers (and the depth buffer). */ static void fs_write_fb(struct fs_compile_context *fcc) { struct toy_compiler *tc = &fcc->tc; int base_mrf = fcc->first_free_mrf; const struct toy_dst header = tdst_ud(tdst(TOY_FILE_MRF, base_mrf, 0)); bool header_present = false; struct toy_src desc; unsigned msg_type, ctrl; int color_slots[ILO_MAX_DRAW_BUFFERS], num_cbufs; int pos_slot = -1, cbuf, i; for (i = 0; i < Elements(color_slots); i++) color_slots[i] = -1; for (i = 0; i < fcc->tgsi.num_outputs; i++) { if (fcc->tgsi.outputs[i].semantic_name == TGSI_SEMANTIC_COLOR) { assert(fcc->tgsi.outputs[i].semantic_index < Elements(color_slots)); color_slots[fcc->tgsi.outputs[i].semantic_index] = i; } else if (fcc->tgsi.outputs[i].semantic_name == TGSI_SEMANTIC_POSITION) { pos_slot = i; } } num_cbufs = fcc->variant->u.fs.num_cbufs; /* still need to send EOT (and probably depth) */ if (!num_cbufs) num_cbufs = 1; /* we need the header to specify the pixel mask or render target */ if (fcc->tgsi.uses_kill || num_cbufs > 1) { const struct toy_src r0 = tsrc_ud(tsrc(TOY_FILE_GRF, 0, 0)); struct toy_inst *inst; inst = tc_MOV(tc, header, r0); inst->mask_ctrl = BRW_MASK_DISABLE; base_mrf += fcc->num_grf_per_vrf; /* this is a two-register header */ if (fcc->dispatch_mode == GEN6_WM_8_DISPATCH_ENABLE) { inst = tc_MOV(tc, tdst_offset(header, 1, 0), tsrc_offset(r0, 1, 0)); inst->mask_ctrl = BRW_MASK_DISABLE; base_mrf += fcc->num_grf_per_vrf; } header_present = true; } for (cbuf = 0; cbuf < num_cbufs; cbuf++) { const int slot = color_slots[(fcc->tgsi.props.fs_color0_writes_all_cbufs) ? 0 : cbuf]; int mrf = base_mrf, vrf; struct toy_src src[4]; if (slot >= 0) { const unsigned undefined_mask = fcc->tgsi.outputs[slot].undefined_mask; const int index = fcc->tgsi.outputs[slot].index; vrf = toy_tgsi_get_vrf(&fcc->tgsi, TGSI_FILE_OUTPUT, 0, index); if (vrf >= 0) { const struct toy_src tmp = tsrc(TOY_FILE_VRF, vrf, 0); tsrc_transpose(tmp, src); } else { /* use (0, 0, 0, 0) */ tsrc_transpose(tsrc_imm_f(0.0f), src); } for (i = 0; i < 4; i++) { const struct toy_dst dst = tdst(TOY_FILE_MRF, mrf, 0); if (undefined_mask & (1 << i)) src[i] = tsrc_imm_f(0.0f); tc_MOV(tc, dst, src[i]); mrf += fcc->num_grf_per_vrf; } } else { /* use (0, 0, 0, 0) */ for (i = 0; i < 4; i++) { const struct toy_dst dst = tdst(TOY_FILE_MRF, mrf, 0); tc_MOV(tc, dst, tsrc_imm_f(0.0f)); mrf += fcc->num_grf_per_vrf; } } /* select BLEND_STATE[rt] */ if (cbuf > 0) { struct toy_inst *inst; inst = tc_MOV(tc, tdst_offset(header, 0, 2), tsrc_imm_ud(cbuf)); inst->mask_ctrl = BRW_MASK_DISABLE; inst->exec_size = BRW_EXECUTE_1; inst->src[0].rect = TOY_RECT_010; } if (cbuf == 0 && pos_slot >= 0) { const int index = fcc->tgsi.outputs[pos_slot].index; const struct toy_dst dst = tdst(TOY_FILE_MRF, mrf, 0); struct toy_src src[4]; int vrf; vrf = toy_tgsi_get_vrf(&fcc->tgsi, TGSI_FILE_OUTPUT, 0, index); if (vrf >= 0) { const struct toy_src tmp = tsrc(TOY_FILE_VRF, vrf, 0); tsrc_transpose(tmp, src); } else { /* use (0, 0, 0, 0) */ tsrc_transpose(tsrc_imm_f(0.0f), src); } /* only Z */ tc_MOV(tc, dst, src[2]); mrf += fcc->num_grf_per_vrf; } msg_type = (fcc->dispatch_mode == GEN6_WM_16_DISPATCH_ENABLE) ? BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE : BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01; ctrl = (cbuf == num_cbufs - 1) << 12 | msg_type << 8; desc = tsrc_imm_mdesc_data_port(tc, cbuf == num_cbufs - 1, mrf - fcc->first_free_mrf, 0, header_present, false, GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE, ctrl, ILO_WM_DRAW_SURFACE(cbuf)); tc_add2(tc, TOY_OPCODE_FB_WRITE, tdst_null(), tsrc(TOY_FILE_MRF, fcc->first_free_mrf, 0), desc); } }