void shader::add_gpr_array(unsigned gpr_start, unsigned gpr_count, unsigned comp_mask) { unsigned chan = 0; while (comp_mask) { if (comp_mask & 1) { gpr_array *a = new gpr_array( sel_chan(gpr_start, chan), gpr_count); SB_DUMP_PASS( sblog << "add_gpr_array: @" << a->base_gpr << " [" << a->array_size << "]\n"; ); gpr_arrays.push_back(a); }
void bc_finalizer::finalize_alu_group(alu_group_node* g, node *prev_node) { alu_node *last = NULL; alu_group_node *prev_g = NULL; bool add_nop = false; if (prev_node && prev_node->is_alu_group()) { prev_g = static_cast<alu_group_node*>(prev_node); } for (node_iterator I = g->begin(), E = g->end(); I != E; ++I) { alu_node *n = static_cast<alu_node*>(*I); unsigned slot = n->bc.slot; value *d = n->dst.empty() ? NULL : n->dst[0]; if (d && d->is_special_reg()) { assert((n->bc.op_ptr->flags & AF_MOVA) || d->is_geometry_emit()); d = NULL; } sel_chan fdst = d ? d->get_final_gpr() : sel_chan(0, 0); if (d) { assert(fdst.chan() == slot || slot == SLOT_TRANS); } n->bc.dst_gpr = fdst.sel(); n->bc.dst_chan = d ? fdst.chan() : slot < SLOT_TRANS ? slot : 0; if (d && d->is_rel() && d->rel && !d->rel->is_const()) { n->bc.dst_rel = 1; update_ngpr(d->array->gpr.sel() + d->array->array_size -1); } else { n->bc.dst_rel = 0; } n->bc.write_mask = d != NULL; n->bc.last = 0; if (n->bc.op_ptr->flags & AF_PRED) { n->bc.update_pred = (n->dst[1] != NULL); n->bc.update_exec_mask = (n->dst[2] != NULL); } // FIXME handle predication here n->bc.pred_sel = PRED_SEL_OFF; update_ngpr(n->bc.dst_gpr); add_nop |= finalize_alu_src(g, n, prev_g); last = n; } if (add_nop) { if (sh.get_ctx().r6xx_gpr_index_workaround) { insert_rv6xx_load_ar_workaround(g); } } last->bc.last = 1; }
value* shader::get_kcache_value(unsigned bank, unsigned index, unsigned chan) { return get_ro_value(kcache_values, VLK_KCACHE, sel_chan((bank << 12) | index, chan)); }