static void gs_setup_payload(struct gs_compile_context *gcc) { int grf, i; grf = 0; /* r0: payload header */ gcc->payload.header = tsrc_d(tsrc(TOY_FILE_GRF, grf, 0)); grf++; /* r1: SVBI */ if (gcc->write_so) { gcc->payload.svbi = tsrc_ud(tsrc(TOY_FILE_GRF, grf, 0)); grf++; } /* URB data */ gcc->shader->in.start_grf = grf; /* no pull constants */ /* VUEs */ for (i = 0; i < gcc->in_vue_count; i++) { gcc->payload.vues[i] = tsrc(TOY_FILE_GRF, grf, 0); grf += gcc->in_vue_size; } gcc->first_free_grf = grf; gcc->last_free_grf = 127; }
static void fs_lower_opcode_tgsi_const_gen6(struct fs_compile_context *fcc, struct toy_dst dst, int dim, struct toy_src idx) { const struct toy_dst header = tdst_ud(tdst(TOY_FILE_MRF, fcc->first_free_mrf, 0)); const struct toy_dst global_offset = tdst_ud(tdst(TOY_FILE_MRF, fcc->first_free_mrf, 2 * 4)); const struct toy_src r0 = tsrc_ud(tsrc(TOY_FILE_GRF, 0, 0)); struct toy_compiler *tc = &fcc->tc; unsigned msg_type, msg_ctrl, msg_len; struct toy_inst *inst; struct toy_src desc; struct toy_dst tmp, real_dst[4]; int i; /* set message header */ inst = tc_MOV(tc, header, r0); inst->mask_ctrl = BRW_MASK_DISABLE; /* set global offset */ inst = tc_MOV(tc, global_offset, idx); inst->mask_ctrl = BRW_MASK_DISABLE; inst->exec_size = BRW_EXECUTE_1; inst->src[0].rect = TOY_RECT_010; msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ; msg_ctrl = BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW << 8; msg_len = 1; desc = tsrc_imm_mdesc_data_port(tc, false, msg_len, 1, true, false, msg_type, msg_ctrl, ILO_WM_CONST_SURFACE(dim)); tmp = tc_alloc_tmp(tc); tc_SEND(tc, tmp, tsrc_from(header), desc, fcc->const_cache); tdst_transpose(dst, real_dst); for (i = 0; i < 4; i++) { const struct toy_src src = tsrc_offset(tsrc_rect(tsrc_from(tmp), TOY_RECT_010), 0, i); /* cast to type D to make sure these are raw moves */ tc_MOV(tc, tdst_d(real_dst[i]), tsrc_d(src)); } }
static void fs_lower_opcode_tgsi_const_gen7(struct fs_compile_context *fcc, struct toy_dst dst, int dim, struct toy_src idx) { struct toy_compiler *tc = &fcc->tc; const struct toy_dst offset = tdst_ud(tdst(TOY_FILE_MRF, fcc->first_free_mrf, 0)); struct toy_src desc; struct toy_inst *inst; struct toy_dst tmp, real_dst[4]; int i; /* * In 4c1fdae0a01b3f92ec03b61aac1d3df500d51fc6, pull constant load was * changed from OWord Block Read to ld to increase performance in the * classic driver. Since we use the constant cache instead of the data * cache, I wonder if we still want to follow the classic driver. */ /* set offset */ inst = tc_MOV(tc, offset, tsrc_rect(idx, TOY_RECT_010)); inst->exec_size = BRW_EXECUTE_8; inst->mask_ctrl = BRW_MASK_DISABLE; desc = tsrc_imm_mdesc_sampler(tc, 1, 1, false, BRW_SAMPLER_SIMD_MODE_SIMD4X2, GEN5_SAMPLER_MESSAGE_SAMPLE_LD, 0, ILO_WM_CONST_SURFACE(dim)); tmp = tc_alloc_tmp(tc); inst = tc_SEND(tc, tmp, tsrc_from(offset), desc, BRW_SFID_SAMPLER); inst->exec_size = BRW_EXECUTE_8; inst->mask_ctrl = BRW_MASK_DISABLE; tdst_transpose(dst, real_dst); for (i = 0; i < 4; i++) { const struct toy_src src = tsrc_offset(tsrc_rect(tsrc_from(tmp), TOY_RECT_010), 0, i); /* cast to type D to make sure these are raw moves */ tc_MOV(tc, tdst_d(real_dst[i]), tsrc_d(src)); } }
static void vs_lower_opcode_tgsi_sv(struct vs_compile_context *vcc, struct toy_dst dst, int dim, int idx) { struct toy_compiler *tc = &vcc->tc; const struct toy_tgsi *tgsi = &vcc->tgsi; int slot; assert(!dim); slot = toy_tgsi_find_system_value(tgsi, idx); if (slot < 0) return; switch (tgsi->system_values[slot].semantic_name) { case TGSI_SEMANTIC_INSTANCEID: case TGSI_SEMANTIC_VERTEXID: /* * In 3DSTATE_VERTEX_ELEMENTS, we prepend an extra vertex element for * the generated IDs, with VID in the X channel and IID in the Y * channel. */ { const int grf = vcc->first_vue_grf; const struct toy_src src = tsrc(TOY_FILE_GRF, grf, 0); const enum toy_swizzle swizzle = (tgsi->system_values[slot].semantic_name == TGSI_SEMANTIC_INSTANCEID) ? TOY_SWIZZLE_Y : TOY_SWIZZLE_X; tc_MOV(tc, tdst_d(dst), tsrc_d(tsrc_swizzle1(src, swizzle))); } break; case TGSI_SEMANTIC_PRIMID: default: tc_fail(tc, "unhandled system value"); tc_MOV(tc, dst, tsrc_imm_d(0)); break; } }
static void fetch_face(struct fs_compile_context *fcc, struct toy_dst dst) { struct toy_compiler *tc = &fcc->tc; const struct toy_src r0 = tsrc_d(tsrc(TOY_FILE_GRF, 0, 0)); struct toy_dst tmp_f, tmp; struct toy_dst real_dst[4]; tdst_transpose(dst, real_dst); tmp_f = tc_alloc_tmp(tc); tmp = tdst_d(tmp_f); tc_SHR(tc, tmp, tsrc_rect(r0, TOY_RECT_010), tsrc_imm_d(15)); tc_AND(tc, tmp, tsrc_from(tmp), tsrc_imm_d(1)); tc_MOV(tc, tmp_f, tsrc_from(tmp)); /* convert to 1.0 and -1.0 */ tc_MUL(tc, tmp_f, tsrc_from(tmp_f), tsrc_imm_f(-2.0f)); tc_ADD(tc, real_dst[0], tsrc_from(tmp_f), tsrc_imm_f(1.0f)); tc_MOV(tc, real_dst[1], tsrc_imm_f(0.0f)); tc_MOV(tc, real_dst[2], tsrc_imm_f(0.0f)); tc_MOV(tc, real_dst[3], tsrc_imm_f(1.0f)); }
static void gs_lower_opcode_emit_so_static(struct gs_compile_context *gcc) { struct toy_compiler *tc = &gcc->tc; struct toy_inst *inst; int i, j; if (gcc->static_data.num_vertices_in_prim < gcc->out_vue_min_count) return; inst = tc_MOV(tc, tdst_w(gcc->vars.tmp), tsrc_imm_v(0x03020100)); inst->exec_size = GEN6_EXECSIZE_8; inst->mask_ctrl = GEN6_MASKCTRL_NOMASK; tc_ADD(tc, tdst_d(gcc->vars.tmp), tsrc_from(tdst_d(gcc->vars.tmp)), tsrc_rect(tsrc_from(gcc->vars.so_index), TOY_RECT_010)); tc_IF(tc, tdst_null(), tsrc_rect(tsrc_offset(tsrc_from(tdst_d(gcc->vars.tmp)), 0, gcc->out_vue_min_count - 1), TOY_RECT_010), tsrc_rect(tsrc_offset(gcc->payload.svbi, 0, 4), TOY_RECT_010), GEN6_COND_LE); { for (i = 0; i < gcc->out_vue_min_count; i++) { for (j = 0; j < gcc->so_info->num_outputs; j++) { const int idx = gcc->so_info->output[j].register_index; struct toy_src index, out; int binding_table_index; bool write_commit; index = tsrc_d(tsrc_offset(tsrc_from(gcc->vars.tmp), 0, i)); if (i == gcc->out_vue_min_count - 1) { out = gcc->vars.tgsi_outs[idx]; } else { /* gcc->vars.buffer_cur also points to the first vertex */ const int buf = (gcc->vars.buffer_cur + i) % gcc->vars.buffer_needed; out = tsrc_offset(tsrc_from(gcc->vars.buffers[buf]), idx, 0); } out = tsrc_offset(out, 0, gcc->so_info->output[j].start_component); /* * From the Sandy Bridge PRM, volume 4 part 2, page 19: * * "The Kernel must do a write commit on the last write to DAP * prior to a URB_WRITE with End of Thread." */ write_commit = (gcc->static_data.num_vertices == gcc->static_data.total_vertices && i == gcc->out_vue_min_count - 1 && j == gcc->so_info->num_outputs - 1); binding_table_index = gcc->shader->bt.gen6_so_base + j; gs_write_so(gcc, gcc->vars.tmp, index, out, write_commit, binding_table_index); /* * From the Sandy Bridge PRM, volume 4 part 1, page 168: * * "The write commit does not modify the destination register, but * merely clears the dependency associated with the destination * register. Thus, a simple "mov" instruction using the register as a * source is sufficient to wait for the write commit to occur." */ if (write_commit) tc_MOV(tc, gcc->vars.tmp, tsrc_from(gcc->vars.tmp)); } } /* SONumPrimsWritten occupies the higher word of m0.2 of URB_WRITE */ tc_ADD(tc, gcc->vars.so_written, tsrc_from(gcc->vars.so_written), tsrc_imm_d(1 << 16)); tc_ADD(tc, gcc->vars.so_index, tsrc_from(gcc->vars.so_index), tsrc_imm_d(gcc->out_vue_min_count)); } tc_ENDIF(tc); }