static struct qinst * vc4_find_cse(struct vc4_compile *c, struct hash_table *ht, struct qinst *inst, uint32_t sf_count, uint32_t r4_count) { if (inst->dst.file != QFILE_TEMP || inst->op == QOP_MOV || qir_get_op_nsrc(inst->op) > 4) { return NULL; } struct inst_key key; memset(&key, 0, sizeof(key)); key.op = inst->op; memcpy(key.src, inst->src, qir_get_op_nsrc(inst->op) * sizeof(key.src[0])); if (qir_depends_on_flags(inst)) key.implicit_arg_update_count = sf_count; if (qir_reads_r4(inst)) key.implicit_arg_update_count = r4_count; uint32_t hash = _mesa_hash_data(&key, sizeof(key)); struct hash_entry *entry = _mesa_hash_table_search_pre_hashed(ht, hash, &key); if (entry) { if (debug) { fprintf(stderr, "CSE found match:\n"); fprintf(stderr, " Original inst: "); qir_dump_inst(c, entry->data); fprintf(stderr, "\n"); fprintf(stderr, " Our inst: "); qir_dump_inst(c, inst); fprintf(stderr, "\n"); } return entry->data; } struct inst_key *alloc_key = ralloc(ht, struct inst_key); if (!alloc_key) return NULL; memcpy(alloc_key, &key, sizeof(*alloc_key)); _mesa_hash_table_insert_pre_hashed(ht, hash, alloc_key, inst); if (debug) { fprintf(stderr, "Added to CSE HT: "); qir_dump_inst(c, inst); fprintf(stderr, "\n"); } return NULL; }
static bool has_nonremovable_reads(struct vc4_compile *c, struct qinst *inst) { for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) { if (inst->src[i].file == QFILE_VPM) { uint32_t attr = inst->src[i].index / 4; uint32_t offset = (inst->src[i].index % 4) * 4; if (c->vattr_sizes[attr] != offset + 4) return true; /* Can't get rid of the last VPM read, or the * simulator (at least) throws an error. */ uint32_t total_size = 0; for (uint32_t i = 0; i < ARRAY_SIZE(c->vattr_sizes); i++) total_size += c->vattr_sizes[i]; if (total_size == 4) return true; } if (inst->src[i].file == QFILE_VARY && c->input_semantics[inst->src[i].index].semantic == 0xff) { return true; } } return false; }
static bool inst_reads_a_uniform(struct qinst *inst) { if (qir_is_tex(inst)) return true; for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) { if (inst->src[i].file == QFILE_UNIF) return true; } return false; }
bool qir_opt_cse(struct vc4_compile *c) { bool progress = false; uint32_t sf_count = 0, r4_count = 0; struct hash_table *ht = _mesa_hash_table_create(NULL, NULL, inst_key_equals); if (!ht) return false; list_for_each_entry(struct qinst, inst, &c->instructions, link) { if (qir_has_side_effects(c, inst) || qir_has_side_effect_reads(c, inst) || inst->op == QOP_TLB_COLOR_READ) { continue; } if (inst->sf) { sf_count++; } else { struct qinst *cse = vc4_find_cse(c, ht, inst, sf_count, r4_count); if (cse) { inst->src[0] = cse->dst; for (int i = 1; i < qir_get_op_nsrc(inst->op); i++) inst->src[i] = c->undef; inst->op = QOP_MOV; progress = true; if (debug) { fprintf(stderr, " Turned into: "); qir_dump_inst(c, inst); fprintf(stderr, "\n"); } } } if (qir_writes_r4(inst)) r4_count++; } ralloc_free(ht); return progress; }
static bool inst_result_equals(struct qinst *a, struct qinst *b) { if (a->op != b->op || qir_depends_on_flags(a) || qir_depends_on_flags(b)) { return false; } for (int i = 0; i < qir_get_op_nsrc(a->op); i++) { if (!qir_reg_equals(a->src[i], b->src[i]) || src_file_varies_on_reread(a->src[i]) || src_file_varies_on_reread(b->src[i])) { return false; } } return true; }
static bool inst_srcs_updated(struct qinst *inst, struct qinst *writer) { /* If the sources get overwritten, stop tracking the * last instruction writing SF. */ switch (writer->dst.file) { case QFILE_TEMP: for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) { if (inst->src[i].file == QFILE_TEMP && inst->src[i].index == writer->dst.index) { return true; } } return false; default: return false; } }
static bool constant_fold(struct vc4_compile *c, struct qinst *inst) { int nsrc = qir_get_op_nsrc(inst->op); uint32_t ui[nsrc]; for (int i = 0; i < nsrc; i++) { struct qreg reg = inst->src[i]; if (reg.file == QFILE_UNIF && c->uniform_contents[reg.index] == QUNIFORM_CONSTANT) { ui[i] = c->uniform_data[reg.index]; } else if (reg.file == QFILE_SMALL_IMM) { ui[i] = reg.index; } else { return false; } } uint32_t result = 0; switch (inst->op) { case QOP_SHR: result = ui[0] >> ui[1]; break; default: return false; } dump_from(c, inst); inst->src[0] = qir_uniform_ui(c, result); for (int i = 1; i < nsrc; i++) inst->src[i] = c->undef; inst->op = QOP_MOV; dump_to(c, inst); return true; }
/* Sets up the def/use arrays for when variables are used-before-defined or * defined-before-used in the block. * * Also initializes the temp_start/temp_end to cover just the instruction IPs * where the variable is used, which will be extended later in * qir_compute_start_end(). */ static void qir_setup_def_use(struct vc4_compile *c) { struct hash_table *partial_update_ht = _mesa_hash_table_create(c, int_hash, int_compare); int ip = 0; qir_for_each_block(block, c) { block->start_ip = ip; _mesa_hash_table_clear(partial_update_ht, NULL); qir_for_each_inst(inst, block) { for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) qir_setup_use(c, block, ip, inst->src[i]); qir_setup_def(c, block, ip, partial_update_ht, inst); if (inst->sf) sf_state_clear(partial_update_ht); switch (inst->op) { case QOP_FRAG_Z: case QOP_FRAG_W: /* The payload registers have values * implicitly loaded at the start of the * program. */ if (inst->dst.file == QFILE_TEMP) c->temp_start[inst->dst.index] = 0; break; default: break; } ip++; } block->end_ip = ip; }
bool qir_opt_dead_code(struct vc4_compile *c) { bool progress = false; bool debug = false; bool *used = calloc(c->num_temps, sizeof(bool)); struct simple_node *node, *t; for (node = c->instructions.prev, t = node->prev; &c->instructions != node; node = t, t = t->prev) { struct qinst *inst = (struct qinst *)node; if (inst->dst.file == QFILE_TEMP && !used[inst->dst.index] && !qir_has_side_effects(inst)) { if (debug) { fprintf(stderr, "Removing: "); qir_dump_inst(inst); fprintf(stderr, "\n"); } qir_remove_instruction(inst); progress = true; continue; } for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) { if (inst->src[i].file == QFILE_TEMP) used[inst->src[i].index] = true; } } free(used); return progress; }
void vc4_generate_code(struct vc4_context *vc4, struct vc4_compile *c) { struct qpu_reg *temp_registers = vc4_register_allocate(vc4, c); bool discard = false; uint32_t inputs_remaining = c->num_inputs; uint32_t vpm_read_fifo_count = 0; uint32_t vpm_read_offset = 0; int last_vpm_read_index = -1; /* Map from the QIR ops enum order to QPU unpack bits. */ static const uint32_t unpack_map[] = { QPU_UNPACK_8A, QPU_UNPACK_8B, QPU_UNPACK_8C, QPU_UNPACK_8D, QPU_UNPACK_16A_TO_F32, QPU_UNPACK_16B_TO_F32, }; list_inithead(&c->qpu_inst_list); switch (c->stage) { case QSTAGE_VERT: case QSTAGE_COORD: /* There's a 4-entry FIFO for VPMVCD reads, each of which can * load up to 16 dwords (4 vec4s) per vertex. */ while (inputs_remaining) { uint32_t num_entries = MIN2(inputs_remaining, 16); queue(c, qpu_load_imm_ui(qpu_vrsetup(), vpm_read_offset | 0x00001a00 | ((num_entries & 0xf) << 20))); inputs_remaining -= num_entries; vpm_read_offset += num_entries; vpm_read_fifo_count++; } assert(vpm_read_fifo_count <= 4); queue(c, qpu_load_imm_ui(qpu_vwsetup(), 0x00001a00)); break; case QSTAGE_FRAG: break; } list_for_each_entry(struct qinst, qinst, &c->instructions, link) { #if 0 fprintf(stderr, "translating qinst to qpu: "); qir_dump_inst(qinst); fprintf(stderr, "\n"); #endif static const struct { uint32_t op; } translate[] = { #define A(name) [QOP_##name] = {QPU_A_##name} #define M(name) [QOP_##name] = {QPU_M_##name} A(FADD), A(FSUB), A(FMIN), A(FMAX), A(FMINABS), A(FMAXABS), A(FTOI), A(ITOF), A(ADD), A(SUB), A(SHL), A(SHR), A(ASR), A(MIN), A(MAX), A(AND), A(OR), A(XOR), A(NOT), M(FMUL), M(MUL24), }; struct qpu_reg src[4]; for (int i = 0; i < qir_get_op_nsrc(qinst->op); i++) { int index = qinst->src[i].index; switch (qinst->src[i].file) { case QFILE_NULL: src[i] = qpu_rn(0); break; case QFILE_TEMP: src[i] = temp_registers[index]; break; case QFILE_UNIF: src[i] = qpu_unif(); break; case QFILE_VARY: src[i] = qpu_vary(); break; case QFILE_SMALL_IMM: src[i].mux = QPU_MUX_SMALL_IMM; src[i].addr = qpu_encode_small_immediate(qinst->src[i].index); /* This should only have returned a valid * small immediate field, not ~0 for failure. */ assert(src[i].addr <= 47); break; case QFILE_VPM: assert((int)qinst->src[i].index >= last_vpm_read_index); (void)last_vpm_read_index; last_vpm_read_index = qinst->src[i].index; src[i] = qpu_ra(QPU_R_VPM); break; } } struct qpu_reg dst; switch (qinst->dst.file) { case QFILE_NULL: dst = qpu_ra(QPU_W_NOP); break; case QFILE_TEMP: dst = temp_registers[qinst->dst.index]; break; case QFILE_VPM: dst = qpu_ra(QPU_W_VPM); break; case QFILE_VARY: case QFILE_UNIF: case QFILE_SMALL_IMM: assert(!"not reached"); break; } switch (qinst->op) { case QOP_MOV: /* Skip emitting the MOV if it's a no-op. */ if (dst.mux == QPU_MUX_A || dst.mux == QPU_MUX_B || dst.mux != src[0].mux || dst.addr != src[0].addr) { queue(c, qpu_a_MOV(dst, src[0])); } break; case QOP_SEL_X_0_ZS: case QOP_SEL_X_0_ZC: case QOP_SEL_X_0_NS: case QOP_SEL_X_0_NC: case QOP_SEL_X_0_CS: case QOP_SEL_X_0_CC: queue(c, qpu_a_MOV(dst, src[0])); set_last_cond_add(c, qinst->op - QOP_SEL_X_0_ZS + QPU_COND_ZS); queue(c, qpu_a_XOR(dst, qpu_r0(), qpu_r0())); set_last_cond_add(c, ((qinst->op - QOP_SEL_X_0_ZS) ^ 1) + QPU_COND_ZS); break; case QOP_SEL_X_Y_ZS: case QOP_SEL_X_Y_ZC: case QOP_SEL_X_Y_NS: case QOP_SEL_X_Y_NC: case QOP_SEL_X_Y_CS: case QOP_SEL_X_Y_CC: queue(c, qpu_a_MOV(dst, src[0])); set_last_cond_add(c, qinst->op - QOP_SEL_X_Y_ZS + QPU_COND_ZS); queue(c, qpu_a_MOV(dst, src[1])); set_last_cond_add(c, ((qinst->op - QOP_SEL_X_Y_ZS) ^ 1) + QPU_COND_ZS); break; case QOP_RCP: case QOP_RSQ: case QOP_EXP2: case QOP_LOG2: switch (qinst->op) { case QOP_RCP: queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP), src[0])); break; case QOP_RSQ: queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT), src[0])); break; case QOP_EXP2: queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP), src[0])); break; case QOP_LOG2: queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_LOG), src[0])); break; default: abort(); } if (dst.mux != QPU_MUX_R4) queue(c, qpu_a_MOV(dst, qpu_r4())); break; case QOP_PACK_8888_F: queue(c, qpu_m_MOV(dst, src[0])); *last_inst(c) |= QPU_PM; *last_inst(c) |= QPU_SET_FIELD(QPU_PACK_MUL_8888, QPU_PACK); break; case QOP_PACK_8A_F: case QOP_PACK_8B_F: case QOP_PACK_8C_F: case QOP_PACK_8D_F: queue(c, qpu_m_MOV(dst, src[0]) | QPU_PM | QPU_SET_FIELD(QPU_PACK_MUL_8A + qinst->op - QOP_PACK_8A_F, QPU_PACK)); break; case QOP_FRAG_X: queue(c, qpu_a_ITOF(dst, qpu_ra(QPU_R_XY_PIXEL_COORD))); break; case QOP_FRAG_Y: queue(c, qpu_a_ITOF(dst, qpu_rb(QPU_R_XY_PIXEL_COORD))); break; case QOP_FRAG_REV_FLAG: queue(c, qpu_a_ITOF(dst, qpu_rb(QPU_R_MS_REV_FLAGS))); break; case QOP_FRAG_Z: case QOP_FRAG_W: /* QOP_FRAG_Z/W don't emit instructions, just allocate * the register to the Z/W payload. */ break; case QOP_TLB_DISCARD_SETUP: discard = true; queue(c, qpu_a_MOV(src[0], src[0])); *last_inst(c) |= QPU_SF; break; case QOP_TLB_STENCIL_SETUP: queue(c, qpu_a_MOV(qpu_ra(QPU_W_TLB_STENCIL_SETUP), src[0])); break; case QOP_TLB_Z_WRITE: queue(c, qpu_a_MOV(qpu_ra(QPU_W_TLB_Z), src[0])); if (discard) { set_last_cond_add(c, QPU_COND_ZS); } break; case QOP_TLB_COLOR_READ: queue(c, qpu_NOP()); *last_inst(c) = qpu_set_sig(*last_inst(c), QPU_SIG_COLOR_LOAD); if (dst.mux != QPU_MUX_R4) queue(c, qpu_a_MOV(dst, qpu_r4())); break; case QOP_TLB_COLOR_WRITE: queue(c, qpu_a_MOV(qpu_tlbc(), src[0])); if (discard) { set_last_cond_add(c, QPU_COND_ZS); } break; case QOP_VARY_ADD_C: queue(c, qpu_a_FADD(dst, src[0], qpu_r5())); break; case QOP_TEX_S: case QOP_TEX_T: case QOP_TEX_R: case QOP_TEX_B: queue(c, qpu_a_MOV(qpu_rb(QPU_W_TMU0_S + (qinst->op - QOP_TEX_S)), src[0])); break; case QOP_TEX_DIRECT: fixup_raddr_conflict(c, dst, &src[0], &src[1]); queue(c, qpu_a_ADD(qpu_rb(QPU_W_TMU0_S), src[0], src[1])); break; case QOP_TEX_RESULT: queue(c, qpu_NOP()); *last_inst(c) = qpu_set_sig(*last_inst(c), QPU_SIG_LOAD_TMU0); if (dst.mux != QPU_MUX_R4) queue(c, qpu_a_MOV(dst, qpu_r4())); break; case QOP_UNPACK_8A_F: case QOP_UNPACK_8B_F: case QOP_UNPACK_8C_F: case QOP_UNPACK_8D_F: case QOP_UNPACK_16A_F: case QOP_UNPACK_16B_F: { if (src[0].mux == QPU_MUX_R4) { queue(c, qpu_a_MOV(dst, src[0])); *last_inst(c) |= QPU_PM; *last_inst(c) |= QPU_SET_FIELD(QPU_UNPACK_8A + (qinst->op - QOP_UNPACK_8A_F), QPU_UNPACK); } else { assert(src[0].mux == QPU_MUX_A); /* Since we're setting the pack bits, if the * destination is in A it would get re-packed. */ queue(c, qpu_a_FMAX((dst.mux == QPU_MUX_A ? qpu_rb(31) : dst), src[0], src[0])); *last_inst(c) |= QPU_SET_FIELD(unpack_map[qinst->op - QOP_UNPACK_8A_F], QPU_UNPACK); if (dst.mux == QPU_MUX_A) { queue(c, qpu_a_MOV(dst, qpu_rb(31))); } } } break; case QOP_UNPACK_8A_I: case QOP_UNPACK_8B_I: case QOP_UNPACK_8C_I: case QOP_UNPACK_8D_I: case QOP_UNPACK_16A_I: case QOP_UNPACK_16B_I: { assert(src[0].mux == QPU_MUX_A); /* Since we're setting the pack bits, if the * destination is in A it would get re-packed. */ queue(c, qpu_a_MOV((dst.mux == QPU_MUX_A ? qpu_rb(31) : dst), src[0])); *last_inst(c) |= QPU_SET_FIELD(unpack_map[qinst->op - QOP_UNPACK_8A_I], QPU_UNPACK); if (dst.mux == QPU_MUX_A) { queue(c, qpu_a_MOV(dst, qpu_rb(31))); } } break; default: assert(qinst->op < ARRAY_SIZE(translate)); assert(translate[qinst->op].op != 0); /* NOPs */ /* If we have only one source, put it in the second * argument slot as well so that we don't take up * another raddr just to get unused data. */ if (qir_get_op_nsrc(qinst->op) == 1) src[1] = src[0]; fixup_raddr_conflict(c, dst, &src[0], &src[1]); if (qir_is_mul(qinst)) { queue(c, qpu_m_alu2(translate[qinst->op].op, dst, src[0], src[1])); if (qinst->dst.pack) { *last_inst(c) |= QPU_PM; *last_inst(c) |= QPU_SET_FIELD(qinst->dst.pack, QPU_PACK); } } else { queue(c, qpu_a_alu2(translate[qinst->op].op, dst, src[0], src[1])); if (qinst->dst.pack) { assert(dst.mux == QPU_MUX_A); *last_inst(c) |= QPU_SET_FIELD(qinst->dst.pack, QPU_PACK); } } break; } if (qinst->sf) { assert(!qir_is_multi_instruction(qinst)); *last_inst(c) |= QPU_SF; } } qpu_schedule_instructions(c); /* thread end can't have VPM write or read */ if (QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1], QPU_WADDR_ADD) == QPU_W_VPM || QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1], QPU_WADDR_MUL) == QPU_W_VPM || QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1], QPU_RADDR_A) == QPU_R_VPM || QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1], QPU_RADDR_B) == QPU_R_VPM) { qpu_serialize_one_inst(c, qpu_NOP()); } /* thread end can't have uniform read */ if (QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1], QPU_RADDR_A) == QPU_R_UNIF || QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1], QPU_RADDR_B) == QPU_R_UNIF) { qpu_serialize_one_inst(c, qpu_NOP()); } /* thread end can't have TLB operations */ if (qpu_inst_is_tlb(c->qpu_insts[c->qpu_inst_count - 1])) qpu_serialize_one_inst(c, qpu_NOP()); c->qpu_insts[c->qpu_inst_count - 1] = qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1], QPU_SIG_PROG_END); qpu_serialize_one_inst(c, qpu_NOP()); qpu_serialize_one_inst(c, qpu_NOP()); switch (c->stage) { case QSTAGE_VERT: case QSTAGE_COORD: break; case QSTAGE_FRAG: c->qpu_insts[c->qpu_inst_count - 1] = qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1], QPU_SIG_SCOREBOARD_UNLOCK); break; } if (vc4_debug & VC4_DEBUG_QPU) vc4_dump_program(c); vc4_qpu_validate(c->qpu_insts, c->qpu_inst_count); free(temp_registers); }
static bool try_copy_prop(struct vc4_compile *c, struct qinst *inst, struct qinst **movs) { bool debug = false; bool progress = false; for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) { if (inst->src[i].file != QFILE_TEMP) continue; /* We have two ways of finding MOVs we can copy propagate * from. One is if it's an SSA def: then we can reuse it from * any block in the program, as long as its source is also an * SSA def. Alternatively, if it's in the "movs" array * tracked within the block, then we know the sources for it * haven't been changed since we saw the instruction within * our block. */ struct qinst *mov = movs[inst->src[i].index]; if (!mov) { if (!is_copy_mov(c->defs[inst->src[i].index])) continue; mov = c->defs[inst->src[i].index]; if (mov->src[0].file == QFILE_TEMP && !c->defs[mov->src[0].index]) continue; } /* Mul rotation's source needs to be in an r0-r3 accumulator, * so no uniforms or regfile-a/r4 unpacking allowed. */ if (inst->op == QOP_ROT_MUL && (mov->src[0].file != QFILE_TEMP || mov->src[0].pack)) continue; uint8_t unpack; if (mov->src[0].pack) { /* Make sure that the meaning of the unpack * would be the same between the two * instructions. */ if (qir_is_float_input(inst) != qir_is_float_input(mov)) { continue; } /* There's only one unpack field, so make sure * this instruction doesn't already use it. */ bool already_has_unpack = false; for (int j = 0; j < qir_get_op_nsrc(inst->op); j++) { if (inst->src[j].pack) already_has_unpack = true; } if (already_has_unpack) continue; /* A destination pack requires the PM bit to * be set to a specific value already, which * may be different from ours. */ if (inst->dst.pack) continue; unpack = mov->src[0].pack; } else { unpack = inst->src[i].pack; } if (debug) { fprintf(stderr, "Copy propagate: "); qir_dump_inst(c, inst); fprintf(stderr, "\n"); } inst->src[i] = mov->src[0]; inst->src[i].pack = unpack; if (debug) { fprintf(stderr, "to: "); qir_dump_inst(c, inst); fprintf(stderr, "\n"); } progress = true; } return progress; }
bool qir_opt_dead_code(struct vc4_compile *c) { bool progress = false; bool *used = calloc(c->num_temps, sizeof(bool)); bool sf_used = false; /* Whether we're eliminating texture setup currently. */ bool dce_tex = false; struct simple_node *node, *t; for (node = c->instructions.prev, t = node->prev; &c->instructions != node; node = t, t = t->prev) { struct qinst *inst = (struct qinst *)node; if (inst->dst.file == QFILE_TEMP && !used[inst->dst.index] && !inst->sf && (!qir_has_side_effects(c, inst) || inst->op == QOP_TEX_RESULT) && !has_nonremovable_reads(c, inst)) { if (inst->op == QOP_TEX_RESULT) { dce_tex = true; c->num_texture_samples--; } for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) { if (inst->src[i].file != QFILE_VPM) continue; uint32_t attr = inst->src[i].index / 4; uint32_t offset = (inst->src[i].index % 4) * 4; if (c->vattr_sizes[attr] == offset + 4) { c->num_inputs--; c->vattr_sizes[attr] -= 4; } } dce(c, inst); progress = true; continue; } if (qir_depends_on_flags(inst)) sf_used = true; if (inst->sf) { if (!sf_used) { if (debug) { fprintf(stderr, "Removing SF on: "); qir_dump_inst(c, inst); fprintf(stderr, "\n"); } inst->sf = false; progress = true; } sf_used = false; } if (inst->op == QOP_TEX_RESULT) dce_tex = false; if (dce_tex && (inst->op == QOP_TEX_S || inst->op == QOP_TEX_T || inst->op == QOP_TEX_R || inst->op == QOP_TEX_B || inst->op == QOP_TEX_DIRECT)) { dce(c, inst); progress = true; continue; } for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) { if (inst->src[i].file == QFILE_TEMP) used[inst->src[i].index] = true; } } free(used); return progress; }
bool qir_opt_dead_code(struct vc4_compile *c) { bool progress = false; bool *used = calloc(c->num_temps, sizeof(bool)); bool sf_used = false; /* Whether we're eliminating texture setup currently. */ bool dce_tex = false; struct simple_node *node, *t; for (node = c->instructions.prev, t = node->prev; &c->instructions != node; node = t, t = t->prev) { struct qinst *inst = (struct qinst *)node; if (inst->dst.file == QFILE_TEMP && !used[inst->dst.index] && (!qir_has_side_effects(inst) || inst->op == QOP_TEX_RESULT)) { if (inst->op == QOP_TEX_RESULT) { dce_tex = true; c->num_texture_samples--; } dce(c, inst); progress = true; continue; } if (qir_depends_on_flags(inst)) sf_used = true; if (inst->op == QOP_SF) { if (!sf_used) { dce(c, inst); progress = true; continue; } sf_used = false; } if (inst->op == QOP_TEX_RESULT) dce_tex = false; if (dce_tex && (inst->op == QOP_TEX_S || inst->op == QOP_TEX_T || inst->op == QOP_TEX_R || inst->op == QOP_TEX_B)) { dce(c, inst); progress = true; continue; } for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) { if (inst->src[i].file == QFILE_TEMP) used[inst->src[i].index] = true; } } free(used); return progress; }
/** * Returns a mapping from QFILE_TEMP indices to struct qpu_regs. * * The return value should be freed by the caller. */ struct qpu_reg * vc4_register_allocate(struct vc4_context *vc4, struct vc4_compile *c) { struct node_to_temp_map map[c->num_temps]; uint32_t temp_to_node[c->num_temps]; uint8_t class_bits[c->num_temps]; struct qpu_reg *temp_registers = calloc(c->num_temps, sizeof(*temp_registers)); /* If things aren't ever written (undefined values), just read from * r0. */ for (uint32_t i = 0; i < c->num_temps; i++) temp_registers[i] = qpu_rn(0); vc4_alloc_reg_set(vc4); struct ra_graph *g = ra_alloc_interference_graph(vc4->regs, c->num_temps); /* Compute the live ranges so we can figure out interference. */ qir_calculate_live_intervals(c); for (uint32_t i = 0; i < c->num_temps; i++) { map[i].temp = i; map[i].priority = c->temp_end[i] - c->temp_start[i]; } qsort(map, c->num_temps, sizeof(map[0]), node_to_temp_priority); for (uint32_t i = 0; i < c->num_temps; i++) { temp_to_node[map[i].temp] = i; } /* Figure out our register classes and preallocated registers. We * start with any temp being able to be in any file, then instructions * incrementally remove bits that the temp definitely can't be in. */ memset(class_bits, CLASS_BIT_A | CLASS_BIT_B_OR_ACC | CLASS_BIT_R4, sizeof(class_bits)); int ip = 0; qir_for_each_inst_inorder(inst, c) { if (qir_writes_r4(inst)) { /* This instruction writes r4 (and optionally moves * its result to a temp), so nothing else can be * stored in r4 across it. */ for (int i = 0; i < c->num_temps; i++) { if (c->temp_start[i] < ip && c->temp_end[i] > ip) class_bits[i] &= ~CLASS_BIT_R4; } } else { /* R4 can't be written as a general purpose * register. (it's TMU_NOSWAP as a write address). */ if (inst->dst.file == QFILE_TEMP) class_bits[inst->dst.index] &= ~CLASS_BIT_R4; } switch (inst->op) { case QOP_FRAG_Z: ra_set_node_reg(g, temp_to_node[inst->dst.index], AB_INDEX + QPU_R_FRAG_PAYLOAD_ZW * 2 + 1); break; case QOP_FRAG_W: ra_set_node_reg(g, temp_to_node[inst->dst.index], AB_INDEX + QPU_R_FRAG_PAYLOAD_ZW * 2); break; case QOP_ROT_MUL: assert(inst->src[0].file == QFILE_TEMP); class_bits[inst->src[0].index] &= ~CLASS_BIT_R0_R3; break; default: break; } if (inst->dst.pack && !qir_is_mul(inst)) { /* The non-MUL pack flags require an A-file dst * register. */ class_bits[inst->dst.index] &= CLASS_BIT_A; } /* Apply restrictions for src unpacks. The integer unpacks * can only be done from regfile A, while float unpacks can be * either A or R4. */ for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) { if (inst->src[i].file == QFILE_TEMP && inst->src[i].pack) { if (qir_is_float_input(inst)) { class_bits[inst->src[i].index] &= CLASS_BIT_A | CLASS_BIT_R4; } else { class_bits[inst->src[i].index] &= CLASS_BIT_A; } } } ip++; } for (uint32_t i = 0; i < c->num_temps; i++) { int node = temp_to_node[i]; switch (class_bits[i]) { case CLASS_BIT_A | CLASS_BIT_B_OR_ACC | CLASS_BIT_R4: ra_set_node_class(g, node, vc4->reg_class_any); break; case CLASS_BIT_A | CLASS_BIT_B_OR_ACC: ra_set_node_class(g, node, vc4->reg_class_a_or_b_or_acc); break; case CLASS_BIT_A | CLASS_BIT_R4: ra_set_node_class(g, node, vc4->reg_class_r4_or_a); break; case CLASS_BIT_A: ra_set_node_class(g, node, vc4->reg_class_a); break; case CLASS_BIT_R0_R3: ra_set_node_class(g, node, vc4->reg_class_r0_r3); break; default: fprintf(stderr, "temp %d: bad class bits: 0x%x\n", i, class_bits[i]); abort(); break; } } for (uint32_t i = 0; i < c->num_temps; i++) { for (uint32_t j = i + 1; j < c->num_temps; j++) { if (!(c->temp_start[i] >= c->temp_end[j] || c->temp_start[j] >= c->temp_end[i])) { ra_add_node_interference(g, temp_to_node[i], temp_to_node[j]); } } } bool ok = ra_allocate(g); if (!ok) { fprintf(stderr, "Failed to register allocate:\n"); qir_dump(c); c->failed = true; return NULL; } for (uint32_t i = 0; i < c->num_temps; i++) { temp_registers[i] = vc4_regs[ra_get_node_reg(g, temp_to_node[i])]; /* If the value's never used, just write to the NOP register * for clarity in debug output. */ if (c->temp_start[i] == c->temp_end[i]) temp_registers[i] = qpu_ra(QPU_W_NOP); } ralloc_free(g); return temp_registers; }
void qir_lower_uniforms(struct vc4_compile *c) { struct hash_table *ht = _mesa_hash_table_create(c, index_hash, index_compare); /* Walk the instruction list, finding which instructions have more * than one uniform referenced, and add those uniform values to the * ht. */ list_for_each_entry(struct qinst, inst, &c->instructions, link) { uint32_t nsrc = qir_get_op_nsrc(inst->op); uint32_t count = 0; for (int i = 0; i < nsrc; i++) { if (inst->src[i].file == QFILE_UNIF) count++; } if (count <= 1) continue; for (int i = 0; i < nsrc; i++) { if (is_lowerable_uniform(inst, i)) add_uniform(ht, inst->src[i]); } } while (ht->entries) { /* Find the most commonly used uniform in instructions that * need a uniform lowered. */ uint32_t max_count = 0; uint32_t max_index = 0; struct hash_entry *entry; hash_table_foreach(ht, entry) { uint32_t count = (uintptr_t)entry->data; uint32_t index = (uintptr_t)entry->key; if (count > max_count) { max_count = count; max_index = index; } } /* Now, find the instructions using this uniform and make them * reference a temp instead. */ struct qreg temp = qir_get_temp(c); struct qreg unif = { QFILE_UNIF, max_index }; struct qinst *mov = qir_inst(QOP_MOV, temp, unif, c->undef); list_add(&mov->link, &c->instructions); c->defs[temp.index] = mov; list_for_each_entry(struct qinst, inst, &c->instructions, link) { uint32_t nsrc = qir_get_op_nsrc(inst->op); uint32_t count = 0; for (int i = 0; i < nsrc; i++) { if (inst->src[i].file == QFILE_UNIF) count++; } if (count <= 1) continue; for (int i = 0; i < nsrc; i++) { if (is_lowerable_uniform(inst, i) && inst->src[i].index == max_index) { inst->src[i] = temp; remove_uniform(ht, unif); count--; } } /* If the instruction doesn't need lowering any more, * then drop it from the list. */ if (count <= 1) { for (int i = 0; i < nsrc; i++) { if (is_lowerable_uniform(inst, i)) remove_uniform(ht, inst->src[i]); } } } }