void vc4_generate_code(struct vc4_context *vc4, struct vc4_compile *c) { struct qpu_reg *temp_registers = vc4_register_allocate(vc4, c); bool discard = false; uint32_t inputs_remaining = c->num_inputs; uint32_t vpm_read_fifo_count = 0; uint32_t vpm_read_offset = 0; int last_vpm_read_index = -1; /* Map from the QIR ops enum order to QPU unpack bits. */ static const uint32_t unpack_map[] = { QPU_UNPACK_8A, QPU_UNPACK_8B, QPU_UNPACK_8C, QPU_UNPACK_8D, QPU_UNPACK_16A_TO_F32, QPU_UNPACK_16B_TO_F32, }; list_inithead(&c->qpu_inst_list); switch (c->stage) { case QSTAGE_VERT: case QSTAGE_COORD: /* There's a 4-entry FIFO for VPMVCD reads, each of which can * load up to 16 dwords (4 vec4s) per vertex. */ while (inputs_remaining) { uint32_t num_entries = MIN2(inputs_remaining, 16); queue(c, qpu_load_imm_ui(qpu_vrsetup(), vpm_read_offset | 0x00001a00 | ((num_entries & 0xf) << 20))); inputs_remaining -= num_entries; vpm_read_offset += num_entries; vpm_read_fifo_count++; } assert(vpm_read_fifo_count <= 4); queue(c, qpu_load_imm_ui(qpu_vwsetup(), 0x00001a00)); break; case QSTAGE_FRAG: break; } list_for_each_entry(struct qinst, qinst, &c->instructions, link) { #if 0 fprintf(stderr, "translating qinst to qpu: "); qir_dump_inst(qinst); fprintf(stderr, "\n"); #endif static const struct { uint32_t op; } translate[] = { #define A(name) [QOP_##name] = {QPU_A_##name} #define M(name) [QOP_##name] = {QPU_M_##name} A(FADD), A(FSUB), A(FMIN), A(FMAX), A(FMINABS), A(FMAXABS), A(FTOI), A(ITOF), A(ADD), A(SUB), A(SHL), A(SHR), A(ASR), A(MIN), A(MAX), A(AND), A(OR), A(XOR), A(NOT), M(FMUL), M(MUL24), }; struct qpu_reg src[4]; for (int i = 0; i < qir_get_op_nsrc(qinst->op); i++) { int index = qinst->src[i].index; switch (qinst->src[i].file) { case QFILE_NULL: src[i] = qpu_rn(0); break; case QFILE_TEMP: src[i] = temp_registers[index]; break; case QFILE_UNIF: src[i] = qpu_unif(); break; case QFILE_VARY: src[i] = qpu_vary(); break; case QFILE_SMALL_IMM: src[i].mux = QPU_MUX_SMALL_IMM; src[i].addr = qpu_encode_small_immediate(qinst->src[i].index); /* This should only have returned a valid * small immediate field, not ~0 for failure. */ assert(src[i].addr <= 47); break; case QFILE_VPM: assert((int)qinst->src[i].index >= last_vpm_read_index); (void)last_vpm_read_index; last_vpm_read_index = qinst->src[i].index; src[i] = qpu_ra(QPU_R_VPM); break; } } struct qpu_reg dst; switch (qinst->dst.file) { case QFILE_NULL: dst = qpu_ra(QPU_W_NOP); break; case QFILE_TEMP: dst = temp_registers[qinst->dst.index]; break; case QFILE_VPM: dst = qpu_ra(QPU_W_VPM); break; case QFILE_VARY: case QFILE_UNIF: case QFILE_SMALL_IMM: assert(!"not reached"); break; } switch (qinst->op) { case QOP_MOV: /* Skip emitting the MOV if it's a no-op. */ if (dst.mux == QPU_MUX_A || dst.mux == QPU_MUX_B || dst.mux != src[0].mux || dst.addr != src[0].addr) { queue(c, qpu_a_MOV(dst, src[0])); } break; case QOP_SEL_X_0_ZS: case QOP_SEL_X_0_ZC: case QOP_SEL_X_0_NS: case QOP_SEL_X_0_NC: case QOP_SEL_X_0_CS: case QOP_SEL_X_0_CC: queue(c, qpu_a_MOV(dst, src[0])); set_last_cond_add(c, qinst->op - QOP_SEL_X_0_ZS + QPU_COND_ZS); queue(c, qpu_a_XOR(dst, qpu_r0(), qpu_r0())); set_last_cond_add(c, ((qinst->op - QOP_SEL_X_0_ZS) ^ 1) + QPU_COND_ZS); break; case QOP_SEL_X_Y_ZS: case QOP_SEL_X_Y_ZC: case QOP_SEL_X_Y_NS: case QOP_SEL_X_Y_NC: case QOP_SEL_X_Y_CS: case QOP_SEL_X_Y_CC: queue(c, qpu_a_MOV(dst, src[0])); set_last_cond_add(c, qinst->op - QOP_SEL_X_Y_ZS + QPU_COND_ZS); queue(c, qpu_a_MOV(dst, src[1])); set_last_cond_add(c, ((qinst->op - QOP_SEL_X_Y_ZS) ^ 1) + QPU_COND_ZS); break; case QOP_RCP: case QOP_RSQ: case QOP_EXP2: case QOP_LOG2: switch (qinst->op) { case QOP_RCP: queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP), src[0])); break; case QOP_RSQ: queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT), src[0])); break; case QOP_EXP2: queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP), src[0])); break; case QOP_LOG2: queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_LOG), src[0])); break; default: abort(); } if (dst.mux != QPU_MUX_R4) queue(c, qpu_a_MOV(dst, qpu_r4())); break; case QOP_PACK_8888_F: queue(c, qpu_m_MOV(dst, src[0])); *last_inst(c) |= QPU_PM; *last_inst(c) |= QPU_SET_FIELD(QPU_PACK_MUL_8888, QPU_PACK); break; case QOP_PACK_8A_F: case QOP_PACK_8B_F: case QOP_PACK_8C_F: case QOP_PACK_8D_F: queue(c, qpu_m_MOV(dst, src[0]) | QPU_PM | QPU_SET_FIELD(QPU_PACK_MUL_8A + qinst->op - QOP_PACK_8A_F, QPU_PACK)); break; case QOP_FRAG_X: queue(c, qpu_a_ITOF(dst, qpu_ra(QPU_R_XY_PIXEL_COORD))); break; case QOP_FRAG_Y: queue(c, qpu_a_ITOF(dst, qpu_rb(QPU_R_XY_PIXEL_COORD))); break; case QOP_FRAG_REV_FLAG: queue(c, qpu_a_ITOF(dst, qpu_rb(QPU_R_MS_REV_FLAGS))); break; case QOP_FRAG_Z: case QOP_FRAG_W: /* QOP_FRAG_Z/W don't emit instructions, just allocate * the register to the Z/W payload. */ break; case QOP_TLB_DISCARD_SETUP: discard = true; queue(c, qpu_a_MOV(src[0], src[0])); *last_inst(c) |= QPU_SF; break; case QOP_TLB_STENCIL_SETUP: queue(c, qpu_a_MOV(qpu_ra(QPU_W_TLB_STENCIL_SETUP), src[0])); break; case QOP_TLB_Z_WRITE: queue(c, qpu_a_MOV(qpu_ra(QPU_W_TLB_Z), src[0])); if (discard) { set_last_cond_add(c, QPU_COND_ZS); } break; case QOP_TLB_COLOR_READ: queue(c, qpu_NOP()); *last_inst(c) = qpu_set_sig(*last_inst(c), QPU_SIG_COLOR_LOAD); if (dst.mux != QPU_MUX_R4) queue(c, qpu_a_MOV(dst, qpu_r4())); break; case QOP_TLB_COLOR_WRITE: queue(c, qpu_a_MOV(qpu_tlbc(), src[0])); if (discard) { set_last_cond_add(c, QPU_COND_ZS); } break; case QOP_VARY_ADD_C: queue(c, qpu_a_FADD(dst, src[0], qpu_r5())); break; case QOP_TEX_S: case QOP_TEX_T: case QOP_TEX_R: case QOP_TEX_B: queue(c, qpu_a_MOV(qpu_rb(QPU_W_TMU0_S + (qinst->op - QOP_TEX_S)), src[0])); break; case QOP_TEX_DIRECT: fixup_raddr_conflict(c, dst, &src[0], &src[1]); queue(c, qpu_a_ADD(qpu_rb(QPU_W_TMU0_S), src[0], src[1])); break; case QOP_TEX_RESULT: queue(c, qpu_NOP()); *last_inst(c) = qpu_set_sig(*last_inst(c), QPU_SIG_LOAD_TMU0); if (dst.mux != QPU_MUX_R4) queue(c, qpu_a_MOV(dst, qpu_r4())); break; case QOP_UNPACK_8A_F: case QOP_UNPACK_8B_F: case QOP_UNPACK_8C_F: case QOP_UNPACK_8D_F: case QOP_UNPACK_16A_F: case QOP_UNPACK_16B_F: { if (src[0].mux == QPU_MUX_R4) { queue(c, qpu_a_MOV(dst, src[0])); *last_inst(c) |= QPU_PM; *last_inst(c) |= QPU_SET_FIELD(QPU_UNPACK_8A + (qinst->op - QOP_UNPACK_8A_F), QPU_UNPACK); } else { assert(src[0].mux == QPU_MUX_A); /* Since we're setting the pack bits, if the * destination is in A it would get re-packed. */ queue(c, qpu_a_FMAX((dst.mux == QPU_MUX_A ? qpu_rb(31) : dst), src[0], src[0])); *last_inst(c) |= QPU_SET_FIELD(unpack_map[qinst->op - QOP_UNPACK_8A_F], QPU_UNPACK); if (dst.mux == QPU_MUX_A) { queue(c, qpu_a_MOV(dst, qpu_rb(31))); } } } break; case QOP_UNPACK_8A_I: case QOP_UNPACK_8B_I: case QOP_UNPACK_8C_I: case QOP_UNPACK_8D_I: case QOP_UNPACK_16A_I: case QOP_UNPACK_16B_I: { assert(src[0].mux == QPU_MUX_A); /* Since we're setting the pack bits, if the * destination is in A it would get re-packed. */ queue(c, qpu_a_MOV((dst.mux == QPU_MUX_A ? qpu_rb(31) : dst), src[0])); *last_inst(c) |= QPU_SET_FIELD(unpack_map[qinst->op - QOP_UNPACK_8A_I], QPU_UNPACK); if (dst.mux == QPU_MUX_A) { queue(c, qpu_a_MOV(dst, qpu_rb(31))); } } break; default: assert(qinst->op < ARRAY_SIZE(translate)); assert(translate[qinst->op].op != 0); /* NOPs */ /* If we have only one source, put it in the second * argument slot as well so that we don't take up * another raddr just to get unused data. */ if (qir_get_op_nsrc(qinst->op) == 1) src[1] = src[0]; fixup_raddr_conflict(c, dst, &src[0], &src[1]); if (qir_is_mul(qinst)) { queue(c, qpu_m_alu2(translate[qinst->op].op, dst, src[0], src[1])); if (qinst->dst.pack) { *last_inst(c) |= QPU_PM; *last_inst(c) |= QPU_SET_FIELD(qinst->dst.pack, QPU_PACK); } } else { queue(c, qpu_a_alu2(translate[qinst->op].op, dst, src[0], src[1])); if (qinst->dst.pack) { assert(dst.mux == QPU_MUX_A); *last_inst(c) |= QPU_SET_FIELD(qinst->dst.pack, QPU_PACK); } } break; } if (qinst->sf) { assert(!qir_is_multi_instruction(qinst)); *last_inst(c) |= QPU_SF; } } qpu_schedule_instructions(c); /* thread end can't have VPM write or read */ if (QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1], QPU_WADDR_ADD) == QPU_W_VPM || QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1], QPU_WADDR_MUL) == QPU_W_VPM || QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1], QPU_RADDR_A) == QPU_R_VPM || QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1], QPU_RADDR_B) == QPU_R_VPM) { qpu_serialize_one_inst(c, qpu_NOP()); } /* thread end can't have uniform read */ if (QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1], QPU_RADDR_A) == QPU_R_UNIF || QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1], QPU_RADDR_B) == QPU_R_UNIF) { qpu_serialize_one_inst(c, qpu_NOP()); } /* thread end can't have TLB operations */ if (qpu_inst_is_tlb(c->qpu_insts[c->qpu_inst_count - 1])) qpu_serialize_one_inst(c, qpu_NOP()); c->qpu_insts[c->qpu_inst_count - 1] = qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1], QPU_SIG_PROG_END); qpu_serialize_one_inst(c, qpu_NOP()); qpu_serialize_one_inst(c, qpu_NOP()); switch (c->stage) { case QSTAGE_VERT: case QSTAGE_COORD: break; case QSTAGE_FRAG: c->qpu_insts[c->qpu_inst_count - 1] = qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1], QPU_SIG_SCOREBOARD_UNLOCK); break; } if (vc4_debug & VC4_DEBUG_QPU) vc4_dump_program(c); vc4_qpu_validate(c->qpu_insts, c->qpu_inst_count); free(temp_registers); }
/** * Checks for the instruction restrictions from page 37 ("Summary of * Instruction Restrictions"). */ void vc4_qpu_validate(uint64_t *insts, uint32_t num_inst) { bool scoreboard_locked = false; /* We don't want to do validation in release builds, but we want to * keep compiling the validation code to make sure it doesn't get * broken. */ #ifndef DEBUG return; #endif for (int i = 0; i < num_inst; i++) { uint64_t inst = insts[i]; if (QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_PROG_END) { if (qpu_inst_is_tlb(inst)) scoreboard_locked = true; continue; } /* "The Thread End instruction must not write to either physical * regfile A or B." */ if (QPU_GET_FIELD(inst, QPU_WADDR_ADD) < 32 || QPU_GET_FIELD(inst, QPU_WADDR_MUL) < 32) { fail_instr(inst, "write to phys reg in thread end"); } /* Can't trigger an implicit wait on scoreboard in the program * end instruction. */ if (qpu_inst_is_tlb(inst) && !scoreboard_locked) fail_instr(inst, "implicit sb wait in program end"); /* Two delay slots will be executed. */ assert(i + 2 <= num_inst); for (int j = i; j < i + 2; j++) { /* "The last three instructions of any program * (Thread End plus the following two delay-slot * instructions) must not do varyings read, uniforms * read or any kind of VPM, VDR, or VDW read or * write." */ if (writes_reg(insts[j], QPU_W_VPM) || reads_reg(insts[j], QPU_R_VARY) || reads_reg(insts[j], QPU_R_UNIF) || reads_reg(insts[j], QPU_R_VPM)) { fail_instr(insts[j], "last 3 instructions " "using fixed functions"); } /* "The Thread End instruction and the following two * delay slot instructions must not write or read * address 14 in either regfile A or B." */ if (writes_reg(insts[j], 14) || reads_reg(insts[j], 14)) { fail_instr(insts[j], "last 3 instructions " "must not use r14"); } } /* "The final program instruction (the second delay slot * instruction) must not do a TLB Z write." */ if (writes_reg(insts[i + 2], QPU_W_TLB_Z)) { fail_instr(insts[i + 2], "final instruction doing " "Z write"); } } /* "A scoreboard wait must not occur in the first two instructions of * a fragment shader. This is either the explicit Wait for Scoreboard * signal or an implicit wait with the first tile-buffer read or * write instruction." */ for (int i = 0; i < 2; i++) { uint64_t inst = insts[i]; if (qpu_inst_is_tlb(inst)) fail_instr(inst, "sb wait in first two insts"); } /* "If TMU_NOSWAP is written, the write must be three instructions * before the first TMU write instruction. For example, if * TMU_NOSWAP is written in the first shader instruction, the first * TMU write cannot occur before the 4th shader instruction." */ int last_tmu_noswap = -10; for (int i = 0; i < num_inst; i++) { uint64_t inst = insts[i]; if ((i - last_tmu_noswap) <= 3 && (writes_reg(inst, QPU_W_TMU0_S) || writes_reg(inst, QPU_W_TMU1_S))) { fail_instr(inst, "TMU write too soon after TMU_NOSWAP"); } if (writes_reg(inst, QPU_W_TMU_NOSWAP)) last_tmu_noswap = i; } /* "An instruction must not read from a location in physical regfile A * or B that was written to by the previous instruction." */ for (int i = 0; i < num_inst - 1; i++) { uint64_t inst = insts[i]; uint32_t add_waddr = QPU_GET_FIELD(inst, QPU_WADDR_ADD); uint32_t mul_waddr = QPU_GET_FIELD(inst, QPU_WADDR_MUL); uint32_t waddr_a, waddr_b; if (inst & QPU_WS) { waddr_b = add_waddr; waddr_a = mul_waddr; } else { waddr_a = add_waddr; waddr_b = mul_waddr; } if ((waddr_a < 32 && reads_a_reg(insts[i + 1], waddr_a)) || (waddr_b < 32 && reads_b_reg(insts[i + 1], waddr_b))) { fail_instr(insts[i + 1], "Reads physical reg too soon after write"); } } /* "After an SFU lookup instruction, accumulator r4 must not be read * in the following two instructions. Any other instruction that * results in r4 being written (that is, TMU read, TLB read, SFU * lookup) cannot occur in the two instructions following an SFU * lookup." */ int last_sfu_inst = -10; for (int i = 0; i < num_inst - 1; i++) { uint64_t inst = insts[i]; uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); if (i - last_sfu_inst <= 2 && (writes_sfu(inst) || sig == QPU_SIG_LOAD_TMU0 || sig == QPU_SIG_LOAD_TMU1 || sig == QPU_SIG_COLOR_LOAD)) { fail_instr(inst, "R4 write too soon after SFU write"); } if (writes_sfu(inst)) last_sfu_inst = i; } for (int i = 0; i < num_inst - 1; i++) { uint64_t inst = insts[i]; if (QPU_GET_FIELD(inst, QPU_SIG) == QPU_SIG_SMALL_IMM && QPU_GET_FIELD(inst, QPU_SMALL_IMM) >= QPU_SMALL_IMM_MUL_ROT) { uint32_t mux_a = QPU_GET_FIELD(inst, QPU_MUL_A); uint32_t mux_b = QPU_GET_FIELD(inst, QPU_MUL_B); /* "The full horizontal vector rotate is only * available when both of the mul ALU input arguments * are taken from accumulators r0-r3." */ if (mux_a > QPU_MUX_R3 || mux_b > QPU_MUX_R3) { fail_instr(inst, "MUL rotate using non-accumulator " "input"); } if (QPU_GET_FIELD(inst, QPU_SMALL_IMM) == QPU_SMALL_IMM_MUL_ROT) { /* "An instruction that does a vector rotate * by r5 must not immediately follow an * instruction that writes to r5." */ if (writes_reg(insts[i - 1], QPU_W_ACC5)) { fail_instr(inst, "vector rotate by r5 " "immediately after r5 write"); } } /* "An instruction that does a vector rotate must not * immediately follow an instruction that writes to the * accumulator that is being rotated." */ if (writes_reg(insts[i - 1], QPU_W_ACC0 + mux_a) || writes_reg(insts[i - 1], QPU_W_ACC0 + mux_b)) { fail_instr(inst, "vector rotate of value " "written in previous instruction"); } } } /* "An instruction that does a vector rotate must not immediately * follow an instruction that writes to the accumulator that is being * rotated. * * XXX: TODO. */ /* "After an instruction that does a TLB Z write, the multisample mask * must not be read as an instruction input argument in the following * two instruction. The TLB Z write instruction can, however, be * followed immediately by a TLB color write." */ for (int i = 0; i < num_inst - 1; i++) { uint64_t inst = insts[i]; if (writes_reg(inst, QPU_W_TLB_Z) && (reads_a_reg(insts[i + 1], QPU_R_MS_REV_FLAGS) || reads_a_reg(insts[i + 2], QPU_R_MS_REV_FLAGS))) { fail_instr(inst, "TLB Z write followed by MS mask read"); } } /* * "A single instruction can only perform a maximum of one of the * following closely coupled peripheral accesses in a single * instruction: TMU write, TMU read, TLB write, TLB read, TLB * combined color read and write, SFU write, Mutex read or Semaphore * access." */ for (int i = 0; i < num_inst - 1; i++) { uint64_t inst = insts[i]; if (qpu_num_sf_accesses(inst) > 1) fail_instr(inst, "Single instruction writes SFU twice"); } /* "The uniform base pointer can be written (from SIMD element 0) by * the processor to reset the stream, there must be at least two * nonuniform-accessing instructions following a pointer change * before uniforms can be accessed once more." */ int last_unif_pointer_update = -3; for (int i = 0; i < num_inst; i++) { uint64_t inst = insts[i]; uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD); uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL); if (reads_reg(inst, QPU_R_UNIF) && i - last_unif_pointer_update <= 2) { fail_instr(inst, "uniform read too soon after pointer update"); } if (waddr_add == QPU_W_UNIFORMS_ADDRESS || waddr_mul == QPU_W_UNIFORMS_ADDRESS) last_unif_pointer_update = i; } }