示例#1
0
static bool
def_not_live_out(nir_ssa_def *def, void *state)
{
   nir_block *after = state;

   return !BITSET_TEST(after->live_in, def->live_index);
}
示例#2
0
/**
 * \note This routine refers to derived texture matrix values to
 * compute the ENABLE_TEXMAT flags, but is only called on
 * _NEW_TEXTURE_OBJECT/STATE.  On changes to _NEW_TEXTURE_MATRIX,
 * the ENABLE_TEXMAT flags are updated by _mesa_update_texture_matrices,
 * above.
 *
 * \param ctx GL context.
 */
void
_mesa_update_texture_state(struct gl_context *ctx)
{
   struct gl_program *prog[MESA_SHADER_STAGES];
   int i;
   int old_max_unit = ctx->Texture._MaxEnabledTexImageUnit;
   BITSET_DECLARE(enabled_texture_units, MAX_COMBINED_TEXTURE_IMAGE_UNITS);

   memcpy(prog, ctx->_Shader->CurrentProgram, sizeof(prog));

   if (prog[MESA_SHADER_FRAGMENT] == NULL &&
       _mesa_arb_fragment_program_enabled(ctx)) {
      prog[MESA_SHADER_FRAGMENT] = ctx->FragmentProgram.Current;
   }

   /* TODO: only set this if there are actual changes */
   ctx->NewState |= _NEW_TEXTURE_OBJECT | _NEW_TEXTURE_STATE;

   ctx->Texture._GenFlags = 0x0;
   ctx->Texture._TexMatEnabled = 0x0;
   ctx->Texture._TexGenEnabled = 0x0;
   ctx->Texture._MaxEnabledTexImageUnit = -1;
   ctx->Texture._EnabledCoordUnits = 0x0;

   memset(&enabled_texture_units, 0, sizeof(enabled_texture_units));

   /* First, walk over our programs pulling in all the textures for them.
    * Programs dictate specific texture targets to be enabled, and for a draw
    * call to be valid they can't conflict about which texture targets are
    * used.
    */
   update_program_texture_state(ctx, prog, enabled_texture_units);

   /* Also pull in any textures necessary for fixed function fragment shading.
    */
   if (!prog[MESA_SHADER_FRAGMENT])
      update_ff_texture_state(ctx, enabled_texture_units);

   /* Now, clear out the _Current of any disabled texture units. */
   for (i = 0; i <= ctx->Texture._MaxEnabledTexImageUnit; i++) {
      if (!BITSET_TEST(enabled_texture_units, i))
         _mesa_reference_texobj(&ctx->Texture.Unit[i]._Current, NULL);
   }
   for (i = ctx->Texture._MaxEnabledTexImageUnit + 1; i <= old_max_unit; i++) {
      _mesa_reference_texobj(&ctx->Texture.Unit[i]._Current, NULL);
   }

   /* add fallback texture for SampleMapATI if there is nothing */
   if (_mesa_ati_fragment_shader_enabled(ctx) &&
       ctx->ATIFragmentShader.Current->Program)
      fix_missing_textures_for_atifs(ctx,
                                     ctx->ATIFragmentShader.Current->Program,
                                     enabled_texture_units);

   if (!prog[MESA_SHADER_FRAGMENT] || !prog[MESA_SHADER_VERTEX])
      update_texgen(ctx);
}
示例#3
0
/**
 * \note This routine refers to derived texture matrix values to
 * compute the ENABLE_TEXMAT flags, but is only called on
 * _NEW_TEXTURE.  On changes to _NEW_TEXTURE_MATRIX, the ENABLE_TEXMAT
 * flags are updated by _mesa_update_texture_matrices, above.
 *
 * \param ctx GL context.
 */
static void
update_texture_state( struct gl_context *ctx )
{
    struct gl_program *prog[MESA_SHADER_STAGES];
    int i;
    int old_max_unit = ctx->Texture._MaxEnabledTexImageUnit;
    BITSET_DECLARE(enabled_texture_units, MAX_COMBINED_TEXTURE_IMAGE_UNITS);

    for (i = 0; i < MESA_SHADER_STAGES; i++) {
        if (ctx->_Shader->CurrentProgram[i] &&
                ctx->_Shader->CurrentProgram[i]->LinkStatus) {
            prog[i] = ctx->_Shader->CurrentProgram[i]->_LinkedShaders[i]->Program;
        } else {
            if (i == MESA_SHADER_FRAGMENT && ctx->FragmentProgram._Enabled)
                prog[i] = &ctx->FragmentProgram.Current->Base;
            else
                prog[i] = NULL;
        }
    }

    /* TODO: only set this if there are actual changes */
    ctx->NewState |= _NEW_TEXTURE;

    ctx->Texture._GenFlags = 0x0;
    ctx->Texture._TexMatEnabled = 0x0;
    ctx->Texture._TexGenEnabled = 0x0;
    ctx->Texture._MaxEnabledTexImageUnit = -1;
    ctx->Texture._EnabledCoordUnits = 0x0;

    memset(&enabled_texture_units, 0, sizeof(enabled_texture_units));

    /* First, walk over our programs pulling in all the textures for them.
     * Programs dictate specific texture targets to be enabled, and for a draw
     * call to be valid they can't conflict about which texture targets are
     * used.
     */
    update_program_texture_state(ctx, prog, enabled_texture_units);

    /* Also pull in any textures necessary for fixed function fragment shading.
     */
    if (!prog[MESA_SHADER_FRAGMENT])
        update_ff_texture_state(ctx, enabled_texture_units);

    /* Now, clear out the _Current of any disabled texture units. */
    for (i = 0; i <= ctx->Texture._MaxEnabledTexImageUnit; i++) {
        if (!BITSET_TEST(enabled_texture_units, i))
            _mesa_reference_texobj(&ctx->Texture.Unit[i]._Current, NULL);
    }
    for (i = ctx->Texture._MaxEnabledTexImageUnit + 1; i <= old_max_unit; i++) {
        _mesa_reference_texobj(&ctx->Texture.Unit[i]._Current, NULL);
    }

    if (!prog[MESA_SHADER_FRAGMENT] || !prog[MESA_SHADER_VERTEX])
        update_texgen(ctx);
}
示例#4
0
static void
qir_setup_use(struct vc4_compile *c, struct qblock *block, int ip,
              struct qreg src)
{
        int var = qir_reg_to_var(src);
        if (var == -1)
                return;

        c->temp_start[var] = MIN2(c->temp_start[var], ip);
        c->temp_end[var] = MAX2(c->temp_end[var], ip);

        /* The use[] bitset marks when the block makes
         * use of a variable without having completely
         * defined that variable within the block.
         */
        if (!BITSET_TEST(block->def, var))
                BITSET_SET(block->use, var);
}
示例#5
0
static void
validate_ssa_def(nir_ssa_def *def, validate_state *state)
{
   assert(def->index < state->impl->ssa_alloc);
   assert(!BITSET_TEST(state->ssa_defs_found, def->index));
   BITSET_SET(state->ssa_defs_found, def->index);

   assert(def->parent_instr == state->instr);

   assert(def->num_components <= 4);

   list_validate(&def->uses);
   list_validate(&def->if_uses);

   ssa_def_validate_state *def_state = ralloc(state->ssa_defs,
                                              ssa_def_validate_state);
   def_state->where_defined = state->impl;
   def_state->uses = _mesa_set_create(def_state, _mesa_hash_pointer,
                                      _mesa_key_pointer_equal);
   def_state->if_uses = _mesa_set_create(def_state, _mesa_hash_pointer,
                                         _mesa_key_pointer_equal);
   _mesa_hash_table_insert(state->ssa_defs, def, def_state);
}
bool
fs_visitor::dead_code_eliminate()
{
   bool progress = false;

   calculate_live_intervals();

   int num_vars = live_intervals->num_vars;
   BITSET_WORD *live = ralloc_array(NULL, BITSET_WORD, BITSET_WORDS(num_vars));

   foreach_block (block, cfg) {
      memcpy(live, live_intervals->bd[block->num].liveout,
             sizeof(BITSET_WORD) * BITSET_WORDS(num_vars));

      foreach_inst_in_block_reverse(fs_inst, inst, block) {
         if (inst->dst.file == GRF &&
             !inst->has_side_effects() &&
             !inst->writes_flag()) {
            bool result_live = false;

            if (inst->regs_written == 1) {
               int var = live_intervals->var_from_reg(&inst->dst);
               result_live = BITSET_TEST(live, var);
            } else {
               int var = live_intervals->var_from_vgrf[inst->dst.reg];
               for (int i = 0; i < inst->regs_written; i++) {
                  result_live = result_live || BITSET_TEST(live, var + i);
               }
            }

            if (!result_live) {
               progress = true;

               if (inst->writes_accumulator) {
                  inst->dst = fs_reg(retype(brw_null_reg(), inst->dst.type));
               } else {
                  inst->opcode = BRW_OPCODE_NOP;
                  continue;
               }
            }
         }

         if (inst->dst.file == GRF) {
            if (!inst->is_partial_write()) {
               int var = live_intervals->var_from_vgrf[inst->dst.reg];
               for (int i = 0; i < inst->regs_written; i++) {
                  BITSET_CLEAR(live, var + inst->dst.reg_offset + i);
               }
            }
         }

         for (int i = 0; i < inst->sources; i++) {
            if (inst->src[i].file == GRF) {
               int var = live_intervals->var_from_vgrf[inst->src[i].reg];

               for (int j = 0; j < inst->regs_read(this, i); j++) {
                  BITSET_SET(live, var + inst->src[i].reg_offset + j);
               }
            }
         }
      }
   }
示例#7
0
static void
update_ff_texture_state(struct gl_context *ctx,
                        BITSET_WORD *enabled_texture_units)
{
    int unit;

    for (unit = 0; unit < ctx->Const.MaxTextureUnits; unit++) {
        struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
        GLbitfield mask;
        bool complete;

        if (texUnit->Enabled == 0x0)
            continue;

        /* If a shader already dictated what texture target was used for this
         * unit, just go along with it.
         */
        if (BITSET_TEST(enabled_texture_units, unit))
            continue;

        /* From the GL 4.4 compat specification, section 16.2 ("Texture Application"):
         *
         *     "Texturing is enabled or disabled using the generic Enable and
         *      Disable commands, respectively, with the symbolic constants
         *      TEXTURE_1D, TEXTURE_2D, TEXTURE_RECTANGLE, TEXTURE_3D, or
         *      TEXTURE_CUBE_MAP to enable the one-, two-, rectangular,
         *      three-dimensional, or cube map texture, respectively. If more
         *      than one of these textures is enabled, the first one enabled
         *      from the following list is used:
         *
         *      • cube map texture
         *      • three-dimensional texture
         *      • rectangular texture
         *      • two-dimensional texture
         *      • one-dimensional texture"
         *
         * Note that the TEXTURE_x_INDEX values are in high to low priority.
         * Also:
         *
         *     "If a texture unit is disabled or has an invalid or incomplete
         *      texture (as defined in section 8.17) bound to it, then blending
         *      is disabled for that texture unit. If the texture environment
         *      for a given enabled texture unit references a disabled texture
         *      unit, or an invalid or incomplete texture that is bound to
         *      another unit, then the results of texture blending are
         *      undefined."
         */
        complete = false;
        mask = texUnit->Enabled;
        while (mask) {
            const int texIndex = u_bit_scan(&mask);
            struct gl_texture_object *texObj = texUnit->CurrentTex[texIndex];
            struct gl_sampler_object *sampler = texUnit->Sampler ?
                                                    texUnit->Sampler : &texObj->Sampler;

            if (!_mesa_is_texture_complete(texObj, sampler)) {
                _mesa_test_texobj_completeness(ctx, texObj);
            }
            if (_mesa_is_texture_complete(texObj, sampler)) {
                _mesa_reference_texobj(&texUnit->_Current, texObj);
                complete = true;
                break;
            }
        }

        if (!complete)
            continue;

        /* if we get here, we know this texture unit is enabled */
        BITSET_SET(enabled_texture_units, unit);
        ctx->Texture._MaxEnabledTexImageUnit =
            MAX2(ctx->Texture._MaxEnabledTexImageUnit, (int)unit);

        ctx->Texture._EnabledCoordUnits |= 1 << unit;

        update_tex_combine(ctx, texUnit);
    }
}
示例#8
0
static void
qir_setup_def(struct vc4_compile *c, struct qblock *block, int ip,
              struct hash_table *partial_update_ht, struct qinst *inst)
{
        /* The def[] bitset marks when an initialization in a
         * block completely screens off previous updates of
         * that variable.
         */
        int var = qir_reg_to_var(inst->dst);
        if (var == -1)
                return;

        c->temp_start[var] = MIN2(c->temp_start[var], ip);
        c->temp_end[var] = MAX2(c->temp_end[var], ip);

        /* If we've already tracked this as a def, or already used it within
         * the block, there's nothing to do.
         */
        if (BITSET_TEST(block->use, var) || BITSET_TEST(block->def, var))
                return;

        /* Easy, common case: unconditional full register update. */
        if (inst->cond == QPU_COND_ALWAYS && !inst->dst.pack) {
                BITSET_SET(block->def, var);
                return;
        }

        /* Finally, look at the condition code and packing and mark it as a
         * def.  We need to make sure that we understand sequences
         * instructions like:
         *
         *     mov.zs t0, t1
         *     mov.zc t0, t2
         *
         * or:
         *
         *     mmov t0.8a, t1
         *     mmov t0.8b, t2
         *     mmov t0.8c, t3
         *     mmov t0.8d, t4
         *
         * as defining the temp within the block, because otherwise dst's live
         * range will get extended up the control flow to the top of the
         * program.
         */
        struct partial_update_state *state =
                get_partial_update_state(partial_update_ht, inst);
        uint8_t mask = qir_channels_written(inst);

        if (inst->cond == QPU_COND_ALWAYS) {
                state->channels |= mask;
        } else {
                for (int i = 0; i < 4; i++) {
                        if (!(mask & (1 << i)))
                                continue;

                        if (state->insts[i] &&
                            state->insts[i]->cond ==
                            qpu_cond_complement(inst->cond))
                                state->channels |= 1 << i;
                        else
                                state->insts[i] = inst;
                }
        }

        if (state->channels == 0xf)
                BITSET_SET(block->def, var);
}