std::vector<std::unique_ptr<ReductionOpportunity>>
ConditionalBranchToSimpleConditionalBranchOpportunityFinder::
    GetAvailableOpportunities(IRContext* context) const {
  std::vector<std::unique_ptr<ReductionOpportunity>> result;

  // Find the opportunities for redirecting all false targets before the
  // opportunities for redirecting all true targets because the former
  // opportunities disable the latter, and vice versa, and the efficiency of the
  // reducer is improved by avoiding contiguous opportunities that disable one
  // another.
  for (bool redirect_to_true : {true, false}) {
    // Consider every function.
    for (auto& function : *context->module()) {
      // Consider every block in the function.
      for (auto& block : function) {
        // The terminator must be SpvOpBranchConditional.
        Instruction* terminator = block.terminator();
        if (terminator->opcode() != SpvOpBranchConditional) {
          continue;
        }

        uint32_t true_block_id =
            terminator->GetSingleWordInOperand(kTrueBranchOperandIndex);
        uint32_t false_block_id =
            terminator->GetSingleWordInOperand(kFalseBranchOperandIndex);

        // The conditional branch must not already be simplified.
        if (true_block_id == false_block_id) {
          continue;
        }

        // The redirected target must not be a back-edge to a structured loop
        // header.
        uint32_t redirected_block_id =
            redirect_to_true ? false_block_id : true_block_id;
        uint32_t containing_loop_header =
            context->GetStructuredCFGAnalysis()->ContainingLoop(block.id());
        // The structured CFG analysis does not include a loop header as part
        // of the loop construct, but we want to include it, so handle this
        // special case:
        if (block.GetLoopMergeInst() != nullptr) {
          containing_loop_header = block.id();
        }
        if (redirected_block_id == containing_loop_header) {
          continue;
        }

        result.push_back(
            MakeUnique<
                ConditionalBranchToSimpleConditionalBranchReductionOpportunity>(
                block.terminator(), redirect_to_true));
      }
    }
  }
  return result;
}
bool AggressiveDCEPass::IsStructuredHeader(BasicBlock* bp,
                                           Instruction** mergeInst,
                                           Instruction** branchInst,
                                           uint32_t* mergeBlockId) {
  if (!bp) return false;
  Instruction* mi = bp->GetMergeInst();
  if (mi == nullptr) return false;
  Instruction* bri = &*bp->tail();
  if (branchInst != nullptr) *branchInst = bri;
  if (mergeInst != nullptr) *mergeInst = mi;
  if (mergeBlockId != nullptr) *mergeBlockId = mi->GetSingleWordInOperand(0);
  return true;
}
void LocalAccessChainConvertPass::FindTargetVars(Function* func) {
  for (auto bi = func->begin(); bi != func->end(); ++bi) {
    for (auto ii = bi->begin(); ii != bi->end(); ++ii) {
      switch (ii->opcode()) {
        case SpvOpStore:
        case SpvOpLoad: {
          uint32_t varId;
          Instruction* ptrInst = GetPtr(&*ii, &varId);
          if (!IsTargetVar(varId)) break;
          const SpvOp op = ptrInst->opcode();
          // Rule out variables with non-supported refs eg function calls
          if (!HasOnlySupportedRefs(varId)) {
            seen_non_target_vars_.insert(varId);
            seen_target_vars_.erase(varId);
            break;
          }
          // Rule out variables with nested access chains
          // TODO(): Convert nested access chains
          if (IsNonPtrAccessChain(op) && ptrInst->GetSingleWordInOperand(
                                             kAccessChainPtrIdInIdx) != varId) {
            seen_non_target_vars_.insert(varId);
            seen_target_vars_.erase(varId);
            break;
          }
          // Rule out variables accessed with non-constant indices
          if (!IsConstantIndexAccessChain(ptrInst)) {
            seen_non_target_vars_.insert(varId);
            seen_target_vars_.erase(varId);
            break;
          }
        } break;
        default:
          break;
      }
    }
  }
}
bool AggressiveDCEPass::AggressiveDCE(Function* func) {
  // Mark function parameters as live.
  AddToWorklist(&func->DefInst());
  func->ForEachParam(
      [this](const Instruction* param) {
        AddToWorklist(const_cast<Instruction*>(param));
      },
      false);

  // Compute map from block to controlling conditional branch
  std::list<BasicBlock*> structuredOrder;
  cfg()->ComputeStructuredOrder(func, &*func->begin(), &structuredOrder);
  ComputeBlock2HeaderMaps(structuredOrder);
  bool modified = false;
  // Add instructions with external side effects to worklist. Also add branches
  // EXCEPT those immediately contained in an "if" selection construct or a loop
  // or continue construct.
  // TODO(greg-lunarg): Handle Frexp, Modf more optimally
  call_in_func_ = false;
  func_is_entry_point_ = false;
  private_stores_.clear();
  // Stacks to keep track of when we are inside an if- or loop-construct.
  // When immediately inside an if- or loop-construct, we do not initially
  // mark branches live. All other branches must be marked live.
  std::stack<bool> assume_branches_live;
  std::stack<uint32_t> currentMergeBlockId;
  // Push sentinel values on stack for when outside of any control flow.
  assume_branches_live.push(true);
  currentMergeBlockId.push(0);
  for (auto bi = structuredOrder.begin(); bi != structuredOrder.end(); ++bi) {
    // If exiting if or loop, update stacks
    if ((*bi)->id() == currentMergeBlockId.top()) {
      assume_branches_live.pop();
      currentMergeBlockId.pop();
    }
    for (auto ii = (*bi)->begin(); ii != (*bi)->end(); ++ii) {
      SpvOp op = ii->opcode();
      switch (op) {
        case SpvOpStore: {
          uint32_t varId;
          (void)GetPtr(&*ii, &varId);
          // Mark stores as live if their variable is not function scope
          // and is not private scope. Remember private stores for possible
          // later inclusion.  We cannot call IsLocalVar at this point because
          // private_like_local_ has not been set yet.
          if (IsVarOfStorage(varId, SpvStorageClassPrivate) ||
              IsVarOfStorage(varId, SpvStorageClassWorkgroup))
            private_stores_.push_back(&*ii);
          else if (!IsVarOfStorage(varId, SpvStorageClassFunction))
            AddToWorklist(&*ii);
        } break;
        case SpvOpCopyMemory:
        case SpvOpCopyMemorySized: {
          uint32_t varId;
          (void)GetPtr(ii->GetSingleWordInOperand(kCopyMemoryTargetAddrInIdx),
                       &varId);
          if (IsVarOfStorage(varId, SpvStorageClassPrivate) ||
              IsVarOfStorage(varId, SpvStorageClassWorkgroup))
            private_stores_.push_back(&*ii);
          else if (!IsVarOfStorage(varId, SpvStorageClassFunction))
            AddToWorklist(&*ii);
        } break;
        case SpvOpLoopMerge: {
          assume_branches_live.push(false);
          currentMergeBlockId.push(
              ii->GetSingleWordInOperand(kLoopMergeMergeBlockIdInIdx));
        } break;
        case SpvOpSelectionMerge: {
          assume_branches_live.push(false);
          currentMergeBlockId.push(
              ii->GetSingleWordInOperand(kSelectionMergeMergeBlockIdInIdx));
        } break;
        case SpvOpSwitch:
        case SpvOpBranch:
        case SpvOpBranchConditional:
        case SpvOpUnreachable: {
          if (assume_branches_live.top()) {
            AddToWorklist(&*ii);
          }
        } break;
        default: {
          // Function calls, atomics, function params, function returns, etc.
          // TODO(greg-lunarg): function calls live only if write to non-local
          if (!ii->IsOpcodeSafeToDelete()) {
            AddToWorklist(&*ii);
          }
          // Remember function calls
          if (op == SpvOpFunctionCall) call_in_func_ = true;
        } break;
      }
    }
  }
  // See if current function is an entry point
  for (auto& ei : get_module()->entry_points()) {
    if (ei.GetSingleWordInOperand(kEntryPointFunctionIdInIdx) ==
        func->result_id()) {
      func_is_entry_point_ = true;
      break;
    }
  }
  // If the current function is an entry point and has no function calls,
  // we can optimize private variables as locals
  private_like_local_ = func_is_entry_point_ && !call_in_func_;
  // If privates are not like local, add their stores to worklist
  if (!private_like_local_)
    for (auto& ps : private_stores_) AddToWorklist(ps);
  // Perform closure on live instruction set.
  while (!worklist_.empty()) {
    Instruction* liveInst = worklist_.front();
    // Add all operand instructions if not already live
    liveInst->ForEachInId([&liveInst, this](const uint32_t* iid) {
      Instruction* inInst = get_def_use_mgr()->GetDef(*iid);
      // Do not add label if an operand of a branch. This is not needed
      // as part of live code discovery and can create false live code,
      // for example, the branch to a header of a loop.
      if (inInst->opcode() == SpvOpLabel && liveInst->IsBranch()) return;
      AddToWorklist(inInst);
    });
    if (liveInst->type_id() != 0) {
      AddToWorklist(get_def_use_mgr()->GetDef(liveInst->type_id()));
    }
    // If in a structured if or loop construct, add the controlling
    // conditional branch and its merge.
    BasicBlock* blk = context()->get_instr_block(liveInst);
    Instruction* branchInst = block2headerBranch_[blk];
    if (branchInst != nullptr) {
      AddToWorklist(branchInst);
      Instruction* mergeInst = branch2merge_[branchInst];
      AddToWorklist(mergeInst);
    }
    // If the block is a header, add the next outermost controlling
    // conditional branch and its merge.
    Instruction* nextBranchInst = header2nextHeaderBranch_[blk];
    if (nextBranchInst != nullptr) {
      AddToWorklist(nextBranchInst);
      Instruction* mergeInst = branch2merge_[nextBranchInst];
      AddToWorklist(mergeInst);
    }
    // If local load, add all variable's stores if variable not already live
    if (liveInst->opcode() == SpvOpLoad || liveInst->IsAtomicWithLoad()) {
      uint32_t varId;
      (void)GetPtr(liveInst, &varId);
      if (varId != 0) {
        ProcessLoad(varId);
      }
      // Process memory copies like loads
    } else if (liveInst->opcode() == SpvOpCopyMemory ||
               liveInst->opcode() == SpvOpCopyMemorySized) {
      uint32_t varId;
      (void)GetPtr(liveInst->GetSingleWordInOperand(kCopyMemorySourceAddrInIdx),
                   &varId);
      if (varId != 0) {
        ProcessLoad(varId);
      }
      // If merge, add other branches that are part of its control structure
    } else if (liveInst->opcode() == SpvOpLoopMerge ||
               liveInst->opcode() == SpvOpSelectionMerge) {
      AddBreaksAndContinuesToWorklist(liveInst);
      // If function call, treat as if it loads from all pointer arguments
    } else if (liveInst->opcode() == SpvOpFunctionCall) {
      liveInst->ForEachInId([this](const uint32_t* iid) {
        // Skip non-ptr args
        if (!IsPtr(*iid)) return;
        uint32_t varId;
        (void)GetPtr(*iid, &varId);
        ProcessLoad(varId);
      });
      // If function parameter, treat as if it's result id is loaded from
    } else if (liveInst->opcode() == SpvOpFunctionParameter) {
      ProcessLoad(liveInst->result_id());
      // We treat an OpImageTexelPointer as a load of the pointer, and
      // that value is manipulated to get the result.
    } else if (liveInst->opcode() == SpvOpImageTexelPointer) {
      uint32_t varId;
      (void)GetPtr(liveInst, &varId);
      if (varId != 0) {
        ProcessLoad(varId);
      }
    }
    worklist_.pop();
  }

  // Kill dead instructions and remember dead blocks
  for (auto bi = structuredOrder.begin(); bi != structuredOrder.end();) {
    uint32_t mergeBlockId = 0;
    (*bi)->ForEachInst([this, &modified, &mergeBlockId](Instruction* inst) {
      if (!IsDead(inst)) return;
      if (inst->opcode() == SpvOpLabel) return;
      // If dead instruction is selection merge, remember merge block
      // for new branch at end of block
      if (inst->opcode() == SpvOpSelectionMerge ||
          inst->opcode() == SpvOpLoopMerge)
        mergeBlockId = inst->GetSingleWordInOperand(0);
      to_kill_.push_back(inst);
      modified = true;
    });
    // If a structured if or loop was deleted, add a branch to its merge
    // block, and traverse to the merge block and continue processing there.
    // We know the block still exists because the label is not deleted.
    if (mergeBlockId != 0) {
      AddBranch(mergeBlockId, *bi);
      for (++bi; (*bi)->id() != mergeBlockId; ++bi) {
      }

      auto merge_terminator = (*bi)->terminator();
      if (merge_terminator->opcode() == SpvOpUnreachable) {
        // The merge was unreachable. This is undefined behaviour so just
        // return (or return an undef). Then mark the new return as live.
        auto func_ret_type_inst = get_def_use_mgr()->GetDef(func->type_id());
        if (func_ret_type_inst->opcode() == SpvOpTypeVoid) {
          merge_terminator->SetOpcode(SpvOpReturn);
        } else {
          // Find an undef for the return value and make sure it gets kept by
          // the pass.
          auto undef_id = Type2Undef(func->type_id());
          auto undef = get_def_use_mgr()->GetDef(undef_id);
          live_insts_.Set(undef->unique_id());
          merge_terminator->SetOpcode(SpvOpReturnValue);
          merge_terminator->SetInOperands({{SPV_OPERAND_TYPE_ID, {undef_id}}});
          get_def_use_mgr()->AnalyzeInstUse(merge_terminator);
        }
        live_insts_.Set(merge_terminator->unique_id());
      }
    } else {
      ++bi;
    }
  }

  return modified;
}
void AggressiveDCEPass::AddBreaksAndContinuesToWorklist(
    Instruction* mergeInst) {
  assert(mergeInst->opcode() == SpvOpSelectionMerge ||
         mergeInst->opcode() == SpvOpLoopMerge);

  BasicBlock* header = context()->get_instr_block(mergeInst);
  uint32_t headerIndex = structured_order_index_[header];
  const uint32_t mergeId = mergeInst->GetSingleWordInOperand(0);
  BasicBlock* merge = context()->get_instr_block(mergeId);
  uint32_t mergeIndex = structured_order_index_[merge];
  get_def_use_mgr()->ForEachUser(
      mergeId, [headerIndex, mergeIndex, this](Instruction* user) {
        if (!user->IsBranch()) return;
        BasicBlock* block = context()->get_instr_block(user);
        uint32_t index = structured_order_index_[block];
        if (headerIndex < index && index < mergeIndex) {
          // This is a break from the loop.
          AddToWorklist(user);
          // Add branch's merge if there is one.
          Instruction* userMerge = branch2merge_[user];
          if (userMerge != nullptr) AddToWorklist(userMerge);
        }
      });

  if (mergeInst->opcode() != SpvOpLoopMerge) {
    return;
  }

  // For loops we need to find the continues as well.
  const uint32_t contId =
      mergeInst->GetSingleWordInOperand(kLoopMergeContinueBlockIdInIdx);
  get_def_use_mgr()->ForEachUser(contId, [&contId, this](Instruction* user) {
    SpvOp op = user->opcode();
    if (op == SpvOpBranchConditional || op == SpvOpSwitch) {
      // A conditional branch or switch can only be a continue if it does not
      // have a merge instruction or its merge block is not the continue block.
      Instruction* hdrMerge = branch2merge_[user];
      if (hdrMerge != nullptr && hdrMerge->opcode() == SpvOpSelectionMerge) {
        uint32_t hdrMergeId =
            hdrMerge->GetSingleWordInOperand(kSelectionMergeMergeBlockIdInIdx);
        if (hdrMergeId == contId) return;
        // Need to mark merge instruction too
        AddToWorklist(hdrMerge);
      }
    } else if (op == SpvOpBranch) {
      // An unconditional branch can only be a continue if it is not
      // branching to its own merge block.
      BasicBlock* blk = context()->get_instr_block(user);
      Instruction* hdrBranch = block2headerBranch_[blk];
      if (hdrBranch == nullptr) return;
      Instruction* hdrMerge = branch2merge_[hdrBranch];
      if (hdrMerge->opcode() == SpvOpLoopMerge) return;
      uint32_t hdrMergeId =
          hdrMerge->GetSingleWordInOperand(kSelectionMergeMergeBlockIdInIdx);
      if (contId == hdrMergeId) return;
    } else {
      return;
    }
    AddToWorklist(user);
  });
}
Пример #6
0
bool BlockMergePass::MergeBlocks(Function* func) {
  bool modified = false;
  for (auto bi = func->begin(); bi != func->end();) {
    // Find block with single successor which has no other predecessors.
    auto ii = bi->end();
    --ii;
    Instruction* br = &*ii;
    if (br->opcode() != SpvOpBranch) {
      ++bi;
      continue;
    }

    const uint32_t lab_id = br->GetSingleWordInOperand(0);
    if (cfg()->preds(lab_id).size() != 1) {
      ++bi;
      continue;
    }

    bool pred_is_merge = IsMerge(&*bi);
    bool succ_is_merge = IsMerge(lab_id);
    if (pred_is_merge && succ_is_merge) {
      // Cannot merge two merges together.
      ++bi;
      continue;
    }

    Instruction* merge_inst = bi->GetMergeInst();
    bool pred_is_header = IsHeader(&*bi);
    if (pred_is_header && lab_id != merge_inst->GetSingleWordInOperand(0u)) {
      bool succ_is_header = IsHeader(lab_id);
      if (pred_is_header && succ_is_header) {
        // Cannot merge two headers together when the successor is not the merge
        // block of the predecessor.
        ++bi;
        continue;
      }

      // If this is a header block and the successor is not its merge, we must
      // be careful about which blocks we are willing to merge together.
      // OpLoopMerge must be followed by a conditional or unconditional branch.
      // The merge must be a loop merge because a selection merge cannot be
      // followed by an unconditional branch.
      BasicBlock* succ_block = context()->get_instr_block(lab_id);
      SpvOp succ_term_op = succ_block->terminator()->opcode();
      assert(merge_inst->opcode() == SpvOpLoopMerge);
      if (succ_term_op != SpvOpBranch &&
          succ_term_op != SpvOpBranchConditional) {
        ++bi;
        continue;
      }
    }

    // Merge blocks.
    context()->KillInst(br);
    auto sbi = bi;
    for (; sbi != func->end(); ++sbi)
      if (sbi->id() == lab_id) break;
    // If bi is sbi's only predecessor, it dominates sbi and thus
    // sbi must follow bi in func's ordering.
    assert(sbi != func->end());

    // Update the inst-to-block mapping for the instructions in sbi.
    for (auto& inst : *sbi) {
      context()->set_instr_block(&inst, &*bi);
    }

    // Now actually move the instructions.
    bi->AddInstructions(&*sbi);

    if (merge_inst) {
      if (pred_is_header && lab_id == merge_inst->GetSingleWordInOperand(0u)) {
        // Merging the header and merge blocks, so remove the structured control
        // flow declaration.
        context()->KillInst(merge_inst);
      } else {
        // Move the merge instruction to just before the terminator.
        merge_inst->InsertBefore(bi->terminator());
      }
    }
    context()->ReplaceAllUsesWith(lab_id, bi->id());
    context()->KillInst(sbi->GetLabelInst());
    (void)sbi.Erase();
    // Reprocess block.
    modified = true;
  }
  return modified;
}