/// \brief Add reference counting operations equal to the effect of the call.
static void emitMatchingRCAdjustmentsForCall(ApplyInst *Call, SILValue OnX) {
    FunctionRefInst *FRI = cast<FunctionRefInst>(Call->getCallee());
    SILFunction *F = FRI->getReferencedFunction();
    auto FnTy = F->getLoweredFunctionType();
    assert(FnTy->getNumAllResults() == 1);
    auto ResultInfo = FnTy->getAllResults()[0];
    (void) ResultInfo;

    assert(ResultInfo.getConvention() == ResultConvention::Owned &&
           "Expect a @owned return");
    assert(Call->getNumArguments() == 1 && "Expect a unary call");

    // Emit a retain for the @owned return.
    SILBuilderWithScope Builder(Call);
    Builder.createRetainValue(Call->getLoc(), OnX, Atomicity::Atomic);

    // Emit a release for the @owned parameter, or none for a @guaranteed
    // parameter.
    auto Params = FnTy->getParameters();
    (void) Params;
    assert(Params.size() == 1 && "Expect one parameter");
    auto ParamInfo = FnTy->getParameters()[0].getConvention();
    assert(ParamInfo == ParameterConvention::Direct_Owned ||
           ParamInfo == ParameterConvention::Direct_Guaranteed);

    if (ParamInfo == ParameterConvention::Direct_Owned)
        Builder.createReleaseValue(Call->getLoc(), OnX, Atomicity::Atomic);
}
static ParameterConvention
getSelfParameterConvention(ApplyInst *SemanticsCall) {
  FunctionRefInst *FRI = cast<FunctionRefInst>(SemanticsCall->getCallee());
  SILFunction *F = FRI->getReferencedFunction();
  auto FnTy = F->getLoweredFunctionType();

  return FnTy->getSelfParameter().getConvention();
}
/// \brief We only know how to simulate reference call effects for unary
/// function calls that take their argument @owned or @guaranteed and return an
/// @owned value.
static bool knowHowToEmitReferenceCountInsts(ApplyInst *Call) {
    if (Call->getNumArguments() != 1)
        return false;

    FunctionRefInst *FRI = cast<FunctionRefInst>(Call->getCallee());
    SILFunction *F = FRI->getReferencedFunction();
    auto FnTy = F->getLoweredFunctionType();

    // Look at the result type.
    if (FnTy->getNumAllResults() != 1)
        return false;
    auto ResultInfo = FnTy->getAllResults()[0];
    if (ResultInfo.getConvention() != ResultConvention::Owned)
        return false;

    // Look at the parameter.
    auto Params = FnTy->getParameters();
    (void) Params;
    assert(Params.size() == 1 && "Expect one parameter");
    auto ParamConv = FnTy->getParameters()[0].getConvention();

    return ParamConv == ParameterConvention::Direct_Owned ||
           ParamConv == ParameterConvention::Direct_Guaranteed;
}
/// \brief Returns the callee SILFunction called at a call site, in the case
/// that the call is transparent (as in, both that the call is marked
/// with the transparent flag and that callee function is actually transparently
/// determinable from the SIL) or nullptr otherwise. This assumes that the SIL
/// is already in SSA form.
///
/// In the case that a non-null value is returned, FullArgs contains effective
/// argument operands for the callee function.
static SILFunction *
getCalleeFunction(FullApplySite AI, bool &IsThick,
                  SmallVectorImpl<SILValue>& CaptureArgs,
                  SmallVectorImpl<SILValue>& FullArgs,
                  PartialApplyInst *&PartialApply,
                  SILModule::LinkingMode Mode) {
  IsThick = false;
  PartialApply = nullptr;
  CaptureArgs.clear();
  FullArgs.clear();

  for (const auto &Arg : AI.getArguments())
    FullArgs.push_back(Arg);
  SILValue CalleeValue = AI.getCallee();

  if (LoadInst *LI = dyn_cast<LoadInst>(CalleeValue)) {
    assert(CalleeValue.getResultNumber() == 0);
    // Conservatively only see through alloc_box; we assume this pass is run
    // immediately after SILGen
    SILInstruction *ABI = dyn_cast<AllocBoxInst>(LI->getOperand());
    if (!ABI)
      return nullptr;
    assert(LI->getOperand().getResultNumber() == 1);

    // Scan forward from the alloc box to find the first store, which
    // (conservatively) must be in the same basic block as the alloc box
    StoreInst *SI = nullptr;
    for (auto I = SILBasicBlock::iterator(ABI), E = I->getParent()->end();
         I != E; ++I) {
      // If we find the load instruction first, then the load is loading from
      // a non-initialized alloc; this shouldn't really happen but I'm not
      // making any assumptions
      if (static_cast<SILInstruction*>(I) == LI)
        return nullptr;
      if ((SI = dyn_cast<StoreInst>(I)) && SI->getDest().getDef() == ABI) {
        // We found a store that we know dominates the load; now ensure there
        // are no other uses of the alloc other than loads, retains, releases
        // and dealloc stacks
        for (auto UI = ABI->use_begin(), UE = ABI->use_end(); UI != UE;
             ++UI)
          if (UI.getUser() != SI && !isa<LoadInst>(UI.getUser()) &&
              !isa<StrongRetainInst>(UI.getUser()) &&
              !isa<StrongReleaseInst>(UI.getUser()))
            return nullptr;
        // We can conservatively see through the store
        break;
      }
    }
    if (!SI)
      return nullptr;
    CalleeValue = SI->getSrc();
  }

  // We are allowed to see through exactly one "partial apply" instruction or
  // one "thin to thick function" instructions, since those are the patterns
  // generated when using auto closures.
  if (PartialApplyInst *PAI =
        dyn_cast<PartialApplyInst>(CalleeValue)) {
    assert(CalleeValue.getResultNumber() == 0);

    for (const auto &Arg : PAI->getArguments()) {
      CaptureArgs.push_back(Arg);
      FullArgs.push_back(Arg);
    }

    CalleeValue = PAI->getCallee();
    IsThick = true;
    PartialApply = PAI;
  } else if (ThinToThickFunctionInst *TTTFI =
               dyn_cast<ThinToThickFunctionInst>(CalleeValue)) {
    assert(CalleeValue.getResultNumber() == 0);
    CalleeValue = TTTFI->getOperand();
    IsThick = true;
  }

  FunctionRefInst *FRI = dyn_cast<FunctionRefInst>(CalleeValue);

  if (!FRI)
    return nullptr;

  SILFunction *CalleeFunction = FRI->getReferencedFunction();

  switch (CalleeFunction->getRepresentation()) {
  case SILFunctionTypeRepresentation::Thick:
  case SILFunctionTypeRepresentation::Thin:
  case SILFunctionTypeRepresentation::Method:
  case SILFunctionTypeRepresentation::WitnessMethod:
    break;
    
  case SILFunctionTypeRepresentation::CFunctionPointer:
  case SILFunctionTypeRepresentation::ObjCMethod:
  case SILFunctionTypeRepresentation::Block:
    return nullptr;
  }

  // If CalleeFunction is a declaration, see if we can load it. If we fail to
  // load it, bail.
  if (CalleeFunction->empty()
      && !AI.getModule().linkFunction(CalleeFunction, Mode))
    return nullptr;
  return CalleeFunction;
}
/// \brief Make sure that all parameters are passed with a reference count
/// neutral parameter convention except for self.
bool swift::ArraySemanticsCall::isValidSignature() {
  assert(SemanticsCall && getKind() != ArrayCallKind::kNone &&
         "Need an array semantic call");
  FunctionRefInst *FRI = cast<FunctionRefInst>(SemanticsCall->getCallee());
  SILFunction *F = FRI->getReferencedFunction();
  auto FnTy = F->getLoweredFunctionType();
  auto &Mod = F->getModule();

  // Check whether we have a valid signature for semantic calls that we hoist.
  switch (getKind()) {
  // All other calls can be consider valid.
  default: break;
  case ArrayCallKind::kArrayPropsIsNativeTypeChecked: {
    // @guaranteed/@owned Self
    if (SemanticsCall->getNumArguments() != 1)
      return false;
    auto SelfConvention = FnTy->getSelfParameter().getConvention();
    return SelfConvention == ParameterConvention::Direct_Guaranteed ||
           SelfConvention == ParameterConvention::Direct_Owned;
  }
  case ArrayCallKind::kCheckIndex: {
    // Int, @guaranteed/@owned Self
    if (SemanticsCall->getNumArguments() != 2 ||
        !SemanticsCall->getArgument(0)->getType().isTrivial(Mod))
      return false;
    auto SelfConvention = FnTy->getSelfParameter().getConvention();
    return SelfConvention == ParameterConvention::Direct_Guaranteed ||
           SelfConvention == ParameterConvention::Direct_Owned;
  }
  case ArrayCallKind::kCheckSubscript: {
    // Int, Bool, Self
    if (SemanticsCall->getNumArguments() != 3 ||
        !SemanticsCall->getArgument(0)->getType().isTrivial(Mod))
      return false;
    if (!SemanticsCall->getArgument(1)->getType().isTrivial(Mod))
      return false;
    auto SelfConvention = FnTy->getSelfParameter().getConvention();
    return SelfConvention == ParameterConvention::Direct_Guaranteed ||
           SelfConvention == ParameterConvention::Direct_Owned;
  }
  case ArrayCallKind::kMakeMutable: {
    auto SelfConvention = FnTy->getSelfParameter().getConvention();
    return SelfConvention == ParameterConvention::Indirect_Inout;
  }
  case ArrayCallKind::kArrayUninitialized: {
    // Make sure that if we are a _adoptStorage call that our storage is
    // uniquely referenced by us.
    SILValue Arg0 = SemanticsCall->getArgument(0);
    if (Arg0->getType().isExistentialType()) {
      auto *AllocBufferAI = dyn_cast<ApplyInst>(Arg0);
      if (!AllocBufferAI)
        return false;
      auto *AllocFn = AllocBufferAI->getReferencedFunction();
      if (!AllocFn || AllocFn->getName() != "swift_bufferAllocate" ||
          !hasOneNonDebugUse(AllocBufferAI))
        return false;
    }
    return true;
  }
  case ArrayCallKind::kWithUnsafeMutableBufferPointer: {
    SILFunctionConventions origConv(SemanticsCall->getOrigCalleeType(), Mod);
    if (origConv.getNumIndirectSILResults() != 1
        || SemanticsCall->getNumArguments() != 3)
      return false;
    auto SelfConvention = FnTy->getSelfParameter().getConvention();
    return SelfConvention == ParameterConvention::Indirect_Inout;
  }
  }

  return true;
}
/// Process an apply instruction which uses a partial_apply
/// as its callee.
/// Returns true on success.
bool PartialApplyCombiner::processSingleApply(FullApplySite AI) {
    Builder.setInsertionPoint(AI.getInstruction());
    Builder.setCurrentDebugScope(AI.getDebugScope());

    // Prepare the args.
    SmallVector<SILValue, 8> Args;
    // First the ApplyInst args.
    for (auto Op : AI.getArguments())
        Args.push_back(Op);

    SILInstruction *InsertionPoint = &*Builder.getInsertionPoint();
    // Next, the partial apply args.

    // Pre-process partial_apply arguments only once, lazily.
    if (isFirstTime) {
        isFirstTime = false;
        if (!allocateTemporaries())
            return false;
    }

    // Now, copy over the partial apply args.
    for (auto Op : PAI->getArguments()) {
        auto Arg = Op;
        // If there is new temporary for this argument, use it instead.
        if (isa<AllocStackInst>(Arg)) {
            if (ArgToTmp.count(Arg)) {
                Op = ArgToTmp.lookup(Arg);
            }
        }
        Args.push_back(Op);
    }

    Builder.setInsertionPoint(InsertionPoint);
    Builder.setCurrentDebugScope(AI.getDebugScope());

    // The thunk that implements the partial apply calls the closure function
    // that expects all arguments to be consumed by the function. However, the
    // captured arguments are not arguments of *this* apply, so they are not
    // pre-incremented. When we combine the partial_apply and this apply into
    // a new apply we need to retain all of the closure non-address type
    // arguments.
    auto ParamInfo = PAI->getSubstCalleeType()->getParameters();
    auto PartialApplyArgs = PAI->getArguments();
    // Set of arguments that need to be released after each invocation.
    SmallVector<SILValue, 8> ToBeReleasedArgs;
    for (unsigned i = 0, e = PartialApplyArgs.size(); i < e; ++i) {
        SILValue Arg = PartialApplyArgs[i];
        if (!Arg->getType().isAddress()) {
            // Retain the argument as the callee may consume it.
            Builder.emitRetainValueOperation(PAI->getLoc(), Arg);
            // For non consumed parameters (e.g. guaranteed), we also need to
            // insert releases after each apply instruction that we create.
            if (!ParamInfo[ParamInfo.size() - PartialApplyArgs.size() + i].
                    isConsumed())
                ToBeReleasedArgs.push_back(Arg);
        }
    }

    auto *F = FRI->getReferencedFunction();
    SILType FnType = F->getLoweredType();
    SILType ResultTy = F->getLoweredFunctionType()->getSILResult();
    ArrayRef<Substitution> Subs = PAI->getSubstitutions();
    if (!Subs.empty()) {
        FnType = FnType.substGenericArgs(PAI->getModule(), Subs);
        ResultTy = FnType.getAs<SILFunctionType>()->getSILResult();
    }

    FullApplySite NAI;
    if (auto *TAI = dyn_cast<TryApplyInst>(AI))
        NAI =
            Builder.createTryApply(AI.getLoc(), FRI, FnType, Subs, Args,
                                   TAI->getNormalBB(), TAI->getErrorBB());
    else
        NAI =
            Builder.createApply(AI.getLoc(), FRI, FnType, ResultTy, Subs, Args,
                                cast<ApplyInst>(AI)->isNonThrowing());

    // We also need to release the partial_apply instruction itself because it
    // is consumed by the apply_instruction.
    if (auto *TAI = dyn_cast<TryApplyInst>(AI)) {
        Builder.setInsertionPoint(TAI->getNormalBB()->begin());
        for (auto Arg : ToBeReleasedArgs) {
            Builder.emitReleaseValueOperation(PAI->getLoc(), Arg);
        }
        Builder.createStrongRelease(AI.getLoc(), PAI, Atomicity::Atomic);
        Builder.setInsertionPoint(TAI->getErrorBB()->begin());
        // Release the non-consumed parameters.
        for (auto Arg : ToBeReleasedArgs) {
            Builder.emitReleaseValueOperation(PAI->getLoc(), Arg);
        }
        Builder.createStrongRelease(AI.getLoc(), PAI, Atomicity::Atomic);
        Builder.setInsertionPoint(AI.getInstruction());
    } else {
        // Release the non-consumed parameters.
        for (auto Arg : ToBeReleasedArgs) {
            Builder.emitReleaseValueOperation(PAI->getLoc(), Arg);
        }
        Builder.createStrongRelease(AI.getLoc(), PAI, Atomicity::Atomic);
    }

    SilCombiner->replaceInstUsesWith(*AI.getInstruction(), NAI.getInstruction());
    SilCombiner->eraseInstFromFunction(*AI.getInstruction());
    return true;
}