Esempio n. 1
0
static SILValue getNextUncurryLevelRef(SILGenFunction &gen,
                                       SILLocation loc,
                                       SILDeclRef next,
                                       bool direct,
                                       ArrayRef<SILValue> curriedArgs,
                                       ArrayRef<Substitution> curriedSubs) {
    // For a foreign function, reference the native thunk.
    if (next.isForeign)
        return gen.emitGlobalFunctionRef(loc, next.asForeign(false));

    // If the fully-uncurried reference is to a native dynamic class method, emit
    // the dynamic dispatch.
    auto fullyAppliedMethod = !next.isCurried && !next.isForeign && !direct &&
                              next.hasDecl();

    auto constantInfo = gen.SGM.Types.getConstantInfo(next);
    SILValue thisArg;
    if (!curriedArgs.empty())
        thisArg = curriedArgs.back();

    if (fullyAppliedMethod &&
            isa<AbstractFunctionDecl>(next.getDecl()) &&
            gen.getMethodDispatch(cast<AbstractFunctionDecl>(next.getDecl()))
            == MethodDispatch::Class) {
        SILValue thisArg = curriedArgs.back();

        // Use the dynamic thunk if dynamic.
        if (next.getDecl()->isDynamic()) {
            auto dynamicThunk = gen.SGM.getDynamicThunk(next, constantInfo);
            return gen.B.createFunctionRef(loc, dynamicThunk);
        }

        return gen.B.createClassMethod(loc, thisArg, next);
    }

    // If the fully-uncurried reference is to a generic method, look up the
    // witness.
    if (fullyAppliedMethod &&
            constantInfo.SILFnType->getRepresentation()
            == SILFunctionTypeRepresentation::WitnessMethod) {
        auto thisType = curriedSubs[0].getReplacement()->getCanonicalType();
        assert(isa<ArchetypeType>(thisType) && "no archetype for witness?!");
        SILValue OpenedExistential;
        if (!cast<ArchetypeType>(thisType)->getOpenedExistentialType().isNull())
            OpenedExistential = thisArg;
        return gen.B.createWitnessMethod(loc, thisType, nullptr, next,
                                         constantInfo.getSILType(),
                                         OpenedExistential);
    }

    // Otherwise, emit a direct call.
    return gen.emitGlobalFunctionRef(loc, next);
}
Esempio n. 2
0
DIExpression *DIExpression::appendToStack(const DIExpression *Expr,
                                          ArrayRef<uint64_t> Ops) {
  assert(Expr && !Ops.empty() && "Can't append ops to this expression");
  assert(none_of(Ops,
                 [](uint64_t Op) {
                   return Op == dwarf::DW_OP_stack_value ||
                          Op == dwarf::DW_OP_LLVM_fragment;
                 }) &&
         "Can't append this op");

  // Append a DW_OP_deref after Expr's current op list if it's non-empty and
  // has no DW_OP_stack_value.
  //
  // Match .* DW_OP_stack_value (DW_OP_LLVM_fragment A B)?.
  Optional<FragmentInfo> FI = Expr->getFragmentInfo();
  unsigned DropUntilStackValue = FI.hasValue() ? 3 : 0;
  ArrayRef<uint64_t> ExprOpsBeforeFragment =
      Expr->getElements().drop_back(DropUntilStackValue);
  bool NeedsDeref = (Expr->getNumElements() > DropUntilStackValue) &&
                    (ExprOpsBeforeFragment.back() != dwarf::DW_OP_stack_value);
  bool NeedsStackValue = NeedsDeref || ExprOpsBeforeFragment.empty();

  // Append a DW_OP_deref after Expr's current op list if needed, then append
  // the new ops, and finally ensure that a single DW_OP_stack_value is present.
  SmallVector<uint64_t, 16> NewOps;
  if (NeedsDeref)
    NewOps.push_back(dwarf::DW_OP_deref);
  NewOps.append(Ops.begin(), Ops.end());
  if (NeedsStackValue)
    NewOps.push_back(dwarf::DW_OP_stack_value);
  return DIExpression::append(Expr, NewOps);
}
Esempio n. 3
0
static ManagedValue emitBuiltinAssign(SILGenFunction &gen,
                                      SILLocation loc,
                                      SubstitutionList substitutions,
                                      ArrayRef<ManagedValue> args,
                                      CanFunctionType formalApplyType,
                                      SGFContext C) {
  assert(args.size() >= 2 && "assign should have two arguments");
  assert(substitutions.size() == 1 &&
         "assign should have a single substitution");

  // The substitution determines the type of the thing we're destroying.
  CanType assignFormalType = substitutions[0].getReplacement()->getCanonicalType();
  SILType assignType = gen.getLoweredType(assignFormalType);
  
  // Convert the destination pointer argument to a SIL address.
  SILValue addr = gen.B.createPointerToAddress(loc,
                                               args.back().getUnmanagedValue(),
                                               assignType.getAddressType(),
                                               /*isStrict*/ true);
  
  // Build the value to be assigned, reconstructing tuples if needed.
  auto src = RValue::withPreExplodedElements(args.slice(0, args.size() - 1),
                                             assignFormalType);
  
  std::move(src).assignInto(gen, loc, addr);

  return ManagedValue::forUnmanaged(gen.emitEmptyTuple(loc));
}
Esempio n. 4
0
static ManagedValue emitBuiltinAssign(SILGenFunction &SGF,
                                      SILLocation loc,
                                      SubstitutionMap substitutions,
                                      ArrayRef<ManagedValue> args,
                                      SGFContext C) {
  assert(args.size() >= 2 && "assign should have two arguments");
  assert(substitutions.getReplacementTypes().size() == 1 &&
         "assign should have a single substitution");

  // The substitution determines the type of the thing we're destroying.
  CanType assignFormalType =
    substitutions.getReplacementTypes()[0]->getCanonicalType();
  SILType assignType = SGF.getLoweredType(assignFormalType);
  
  // Convert the destination pointer argument to a SIL address.
  SILValue addr = SGF.B.createPointerToAddress(loc,
                                               args.back().getUnmanagedValue(),
                                               assignType.getAddressType(),
                                               /*isStrict*/ true,
                                               /*isInvariant*/ false);
  
  // Build the value to be assigned, reconstructing tuples if needed.
  auto src = RValue(SGF, args.slice(0, args.size() - 1), assignFormalType);
  
  std::move(src).ensurePlusOne(SGF, loc).assignInto(SGF, loc, addr);

  return ManagedValue::forUnmanaged(SGF.emitEmptyTuple(loc));
}
Esempio n. 5
0
bool Module::fullModuleNameIs(ArrayRef<StringRef> nameParts) const {
  for (const Module *M = this; M; M = M->Parent) {
    if (nameParts.empty() || M->Name != nameParts.back())
      return false;
    nameParts = nameParts.drop_back();
  }
  return nameParts.empty();
}
Esempio n. 6
0
static void reportModuleReferences(const Module *Mod,
                                   ArrayRef<SourceLocation> IdLocs,
                                   const ImportDecl *ImportD,
                                   IndexDataConsumer &DataConsumer) {
  if (!Mod)
    return;
  reportModuleReferences(Mod->Parent, IdLocs.drop_back(), ImportD,
                         DataConsumer);
  DataConsumer.handleModuleOccurence(ImportD, Mod,
                                     (SymbolRoleSet)SymbolRole::Reference,
                                     IdLocs.back());
}
Esempio n. 7
0
bool SemaAnnotator::passModulePathElements(
    ArrayRef<ImportDecl::AccessPathElement> Path,
    const clang::Module *ClangMod) {

  if (Path.empty() || !ClangMod)
    return true;

  if (!passModulePathElements(Path.drop_back(1), ClangMod->Parent))
    return false;

  return passReference(ClangMod, Path.back());
}
Esempio n. 8
0
Expected<typename ELFT::DynRange> ELFFile<ELFT>::dynamicEntries() const {
  ArrayRef<Elf_Dyn> Dyn;
  size_t DynSecSize = 0;

  auto ProgramHeadersOrError = program_headers();
  if (!ProgramHeadersOrError)
    return ProgramHeadersOrError.takeError();

  for (const Elf_Phdr &Phdr : *ProgramHeadersOrError) {
    if (Phdr.p_type == ELF::PT_DYNAMIC) {
      Dyn = makeArrayRef(
          reinterpret_cast<const Elf_Dyn *>(base() + Phdr.p_offset),
          Phdr.p_filesz / sizeof(Elf_Dyn));
      DynSecSize = Phdr.p_filesz;
      break;
    }
  }

  // If we can't find the dynamic section in the program headers, we just fall
  // back on the sections.
  if (Dyn.empty()) {
    auto SectionsOrError = sections();
    if (!SectionsOrError)
      return SectionsOrError.takeError();

    for (const Elf_Shdr &Sec : *SectionsOrError) {
      if (Sec.sh_type == ELF::SHT_DYNAMIC) {
        Expected<ArrayRef<Elf_Dyn>> DynOrError =
            getSectionContentsAsArray<Elf_Dyn>(&Sec);
        if (!DynOrError)
          return DynOrError.takeError();
        Dyn = *DynOrError;
        DynSecSize = Sec.sh_size;
        break;
      }
    }

    if (!Dyn.data())
      return ArrayRef<Elf_Dyn>();
  }

  if (Dyn.empty())
    return createError("invalid empty dynamic section");

  if (DynSecSize % sizeof(Elf_Dyn) != 0)
    return createError("malformed dynamic section");

  if (Dyn.back().d_tag != ELF::DT_NULL)
    return createError("dynamic sections must be DT_NULL terminated");

  return Dyn;
}
Esempio n. 9
0
static SILValue
getThunkedForeignFunctionRef(SILGenFunction &gen,
                             SILLocation loc,
                             SILDeclRef foreign,
                             ArrayRef<ManagedValue> args,
                             ArrayRef<Substitution> subs,
                             const SILConstantInfo &foreignCI) {
  assert(!foreign.isCurried
         && "should not thunk calling convention when curried");

  // Produce a witness_method when thunking ObjC protocol methods.
  auto dc = foreign.getDecl()->getDeclContext();
  if (isa<ProtocolDecl>(dc) && cast<ProtocolDecl>(dc)->isObjC()) {
    assert(subs.size() == 1);
    auto thisType = subs[0].getReplacement()->getCanonicalType();
    assert(isa<ArchetypeType>(thisType) && "no archetype for witness?!");
    SILValue thisArg = args.back().getValue();

    SILValue OpenedExistential;
    if (!cast<ArchetypeType>(thisType)->getOpenedExistentialType().isNull())
      OpenedExistential = thisArg;
    auto conformance = ProtocolConformanceRef(cast<ProtocolDecl>(dc));
    return gen.B.createWitnessMethod(loc, thisType, conformance, foreign,
                                     foreignCI.getSILType(),
                                     OpenedExistential);

  // Produce a class_method when thunking imported ObjC methods.
  } else if (foreignCI.SILFnType->getRepresentation()
        == SILFunctionTypeRepresentation::ObjCMethod) {
    assert(subs.empty());
    SILValue thisArg = args.back().getValue();

    return gen.B.createClassMethod(loc, thisArg, foreign,
                         SILType::getPrimitiveObjectType(foreignCI.SILFnType),
                                   /*volatile*/ true);
  }
  // Otherwise, emit a function_ref.
  return gen.emitGlobalFunctionRef(loc, foreign);
}
Esempio n. 10
0
/// Emit an open-coded protocol-witness thunk for materializeForSet if
/// delegating to the standard implementation isn't good enough.
///
/// materializeForSet sometimes needs to be open-coded because of the
/// thin callback function, which is dependent but cannot be reabstracted.
///
/// - In a protocol extension, the callback doesn't know how to capture
///   or reconstruct the generic conformance information.
///
/// - The abstraction pattern of the variable from the witness may
///   differ from the abstraction pattern of the protocol, likely forcing
///   a completely different access pattern (e.g. to write back a
///   reabstracted value instead of modifying it in-place).
///
/// \return true if special code was emitted
bool SILGenFunction::
maybeEmitMaterializeForSetThunk(ProtocolConformance *conformance,
                                FuncDecl *requirement, FuncDecl *witness,
                                ArrayRef<Substitution> witnessSubs,
                                ArrayRef<ManagedValue> origParams) {
  // Break apart the parameters.  self comes last, the result buffer
  // comes first, the callback storage buffer comes second, and the
  // rest are indices.
  ManagedValue self = origParams.back();
  SILValue resultBuffer = origParams[0].getUnmanagedValue();
  SILValue callbackBuffer = origParams[1].getUnmanagedValue();
  ArrayRef<ManagedValue> indices = origParams.slice(2).drop_back();

  MaterializeForSetEmitter emitter(SGM, conformance, requirement, witness,
                                   witnessSubs, self.getType());

  if (!emitter.shouldOpenCode())
    return false;

  emitter.emit(*this, self, resultBuffer, callbackBuffer, indices);
  return true;
}
Esempio n. 11
0
/// foldMemoryOperand - Try folding stack slot references in Ops into their
/// instructions.
///
/// @param Ops    Operand indices from analyzeVirtReg().
/// @param LoadMI Load instruction to use instead of stack slot when non-null.
/// @return       True on success.
bool InlineSpiller::
foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
                  MachineInstr *LoadMI) {
  if (Ops.empty())
    return false;
  // Don't attempt folding in bundles.
  MachineInstr *MI = Ops.front().first;
  if (Ops.back().first != MI || MI->isBundled())
    return false;

  bool WasCopy = MI->isCopy();
  unsigned ImpReg = 0;

  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
  // operands.
  SmallVector<unsigned, 8> FoldOps;
  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
    unsigned Idx = Ops[i].second;
    MachineOperand &MO = MI->getOperand(Idx);
    if (MO.isImplicit()) {
      ImpReg = MO.getReg();
      continue;
    }
    // FIXME: Teach targets to deal with subregs.
    if (MO.getSubReg())
      return false;
    // We cannot fold a load instruction into a def.
    if (LoadMI && MO.isDef())
      return false;
    // Tied use operands should not be passed to foldMemoryOperand.
    if (!MI->isRegTiedToDefOperand(Idx))
      FoldOps.push_back(Idx);
  }

  MachineInstr *FoldMI =
                LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI)
                       : TII.foldMemoryOperand(MI, FoldOps, StackSlot);
  if (!FoldMI)
    return false;
  LIS.ReplaceMachineInstrInMaps(MI, FoldMI);
  MI->eraseFromParent();

  // TII.foldMemoryOperand may have left some implicit operands on the
  // instruction.  Strip them.
  if (ImpReg)
    for (unsigned i = FoldMI->getNumOperands(); i; --i) {
      MachineOperand &MO = FoldMI->getOperand(i - 1);
      if (!MO.isReg() || !MO.isImplicit())
        break;
      if (MO.getReg() == ImpReg)
        FoldMI->RemoveOperand(i - 1);
    }

  DEBUG(dbgs() << "\tfolded:  " << LIS.getInstructionIndex(FoldMI) << '\t'
               << *FoldMI);
  if (!WasCopy)
    ++NumFolded;
  else if (Ops.front().second == 0)
    ++NumSpills;
  else
    ++NumReloads;
  return true;
}
Esempio n. 12
0
/// foldMemoryOperand - Try folding stack slot references in Ops into their
/// instructions.
///
/// @param Ops    Operand indices from analyzeVirtReg().
/// @param LoadMI Load instruction to use instead of stack slot when non-null.
/// @return       True on success.
bool InlineSpiller::
foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
                  MachineInstr *LoadMI) {
  if (Ops.empty())
    return false;
  // Don't attempt folding in bundles.
  MachineInstr *MI = Ops.front().first;
  if (Ops.back().first != MI || MI->isBundled())
    return false;

  bool WasCopy = MI->isCopy();
  unsigned ImpReg = 0;

  bool SpillSubRegs = (MI->getOpcode() == TargetOpcode::PATCHPOINT ||
                       MI->getOpcode() == TargetOpcode::STACKMAP);

  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
  // operands.
  SmallVector<unsigned, 8> FoldOps;
  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
    unsigned Idx = Ops[i].second;
    MachineOperand &MO = MI->getOperand(Idx);
    if (MO.isImplicit()) {
      ImpReg = MO.getReg();
      continue;
    }
    // FIXME: Teach targets to deal with subregs.
    if (!SpillSubRegs && MO.getSubReg())
      return false;
    // We cannot fold a load instruction into a def.
    if (LoadMI && MO.isDef())
      return false;
    // Tied use operands should not be passed to foldMemoryOperand.
    if (!MI->isRegTiedToDefOperand(Idx))
      FoldOps.push_back(Idx);
  }

  MachineInstrSpan MIS(MI);

  MachineInstr *FoldMI =
                LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI)
                       : TII.foldMemoryOperand(MI, FoldOps, StackSlot);
  if (!FoldMI)
    return false;

  // Remove LIS for any dead defs in the original MI not in FoldMI.
  for (MIBundleOperands MO(MI); MO.isValid(); ++MO) {
    if (!MO->isReg())
      continue;
    unsigned Reg = MO->getReg();
    if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
        MRI.isReserved(Reg)) {
      continue;
    }
    // Skip non-Defs, including undef uses and internal reads.
    if (MO->isUse())
      continue;
    MIBundleOperands::PhysRegInfo RI =
      MIBundleOperands(FoldMI).analyzePhysReg(Reg, &TRI);
    if (RI.Defines)
      continue;
    // FoldMI does not define this physreg. Remove the LI segment.
    assert(MO->isDead() && "Cannot fold physreg def");
    for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) {
      if (LiveRange *LR = LIS.getCachedRegUnit(*Units)) {
        SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
        if (VNInfo *VNI = LR->getVNInfoAt(Idx))
          LR->removeValNo(VNI);
      }
    }
  }

  LIS.ReplaceMachineInstrInMaps(MI, FoldMI);
  MI->eraseFromParent();

  // Insert any new instructions other than FoldMI into the LIS maps.
  assert(!MIS.empty() && "Unexpected empty span of instructions!");
  for (MachineBasicBlock::iterator MII = MIS.begin(), End = MIS.end();
       MII != End; ++MII)
    if (&*MII != FoldMI)
      LIS.InsertMachineInstrInMaps(&*MII);

  // TII.foldMemoryOperand may have left some implicit operands on the
  // instruction.  Strip them.
  if (ImpReg)
    for (unsigned i = FoldMI->getNumOperands(); i; --i) {
      MachineOperand &MO = FoldMI->getOperand(i - 1);
      if (!MO.isReg() || !MO.isImplicit())
        break;
      if (MO.getReg() == ImpReg)
        FoldMI->RemoveOperand(i - 1);
    }

  DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
                                           "folded"));

  if (!WasCopy)
    ++NumFolded;
  else if (Ops.front().second == 0)
    ++NumSpills;
  else
    ++NumReloads;
  return true;
}
bool X86CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
                                CallingConv::ID CallConv,
                                const MachineOperand &Callee,
                                const ArgInfo &OrigRet,
                                ArrayRef<ArgInfo> OrigArgs) const {
  MachineFunction &MF = MIRBuilder.getMF();
  const Function &F = MF.getFunction();
  MachineRegisterInfo &MRI = MF.getRegInfo();
  auto &DL = F.getParent()->getDataLayout();
  const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
  const TargetInstrInfo &TII = *STI.getInstrInfo();
  auto TRI = STI.getRegisterInfo();

  // Handle only Linux C, X86_64_SysV calling conventions for now.
  if (!STI.isTargetLinux() ||
      !(CallConv == CallingConv::C || CallConv == CallingConv::X86_64_SysV))
    return false;

  unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
  auto CallSeqStart = MIRBuilder.buildInstr(AdjStackDown);

  // Create a temporarily-floating call instruction so we can add the implicit
  // uses of arg registers.
  bool Is64Bit = STI.is64Bit();
  unsigned CallOpc = Callee.isReg()
                         ? (Is64Bit ? X86::CALL64r : X86::CALL32r)
                         : (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32);

  auto MIB = MIRBuilder.buildInstrNoInsert(CallOpc).add(Callee).addRegMask(
      TRI->getCallPreservedMask(MF, CallConv));

  SmallVector<ArgInfo, 8> SplitArgs;
  for (const auto &OrigArg : OrigArgs) {

    // TODO: handle not simple cases.
    if (OrigArg.Flags.isByVal())
      return false;

    if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
                           [&](ArrayRef<unsigned> Regs) {
                             MIRBuilder.buildUnmerge(Regs, OrigArg.Reg);
                           }))
      return false;
  }
  // Do the actual argument marshalling.
  OutgoingValueHandler Handler(MIRBuilder, MRI, MIB, CC_X86);
  if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
    return false;

  bool IsFixed = OrigArgs.empty() ? true : OrigArgs.back().IsFixed;
  if (STI.is64Bit() && !IsFixed && !STI.isCallingConvWin64(CallConv)) {
    // From AMD64 ABI document:
    // For calls that may call functions that use varargs or stdargs
    // (prototype-less calls or calls to functions containing ellipsis (...) in
    // the declaration) %al is used as hidden argument to specify the number
    // of SSE registers used. The contents of %al do not need to match exactly
    // the number of registers, but must be an ubound on the number of SSE
    // registers used and is in the range 0 - 8 inclusive.

    MIRBuilder.buildInstr(X86::MOV8ri)
        .addDef(X86::AL)
        .addImm(Handler.getNumXmmRegs());
    MIB.addUse(X86::AL, RegState::Implicit);
  }

  // Now we can add the actual call instruction to the correct basic block.
  MIRBuilder.insertInstr(MIB);

  // If Callee is a reg, since it is used by a target specific
  // instruction, it must have a register class matching the
  // constraint of that instruction.
  if (Callee.isReg())
    MIB->getOperand(0).setReg(constrainOperandRegClass(
        MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
        *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Callee, 0));

  // Finally we can copy the returned value back into its virtual-register. In
  // symmetry with the arguments, the physical register must be an
  // implicit-define of the call instruction.

  if (OrigRet.Reg) {
    SplitArgs.clear();
    SmallVector<unsigned, 8> NewRegs;

    if (!splitToValueTypes(OrigRet, SplitArgs, DL, MRI,
                           [&](ArrayRef<unsigned> Regs) {
                             NewRegs.assign(Regs.begin(), Regs.end());
                           }))
      return false;

    CallReturnHandler Handler(MIRBuilder, MRI, RetCC_X86, MIB);
    if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
      return false;

    if (!NewRegs.empty())
      MIRBuilder.buildMerge(OrigRet.Reg, NewRegs);
  }

  CallSeqStart.addImm(Handler.getStackSize())
      .addImm(0 /* see getFrameTotalSize */)
      .addImm(0 /* see getFrameAdjustment */);

  unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
  MIRBuilder.buildInstr(AdjStackUp)
      .addImm(Handler.getStackSize())
      .addImm(0 /* NumBytesForCalleeToPop */);

  return true;
}
Esempio n. 14
0
bool GenericSignature::enumeratePairedRequirements(
               llvm::function_ref<bool(Type, ArrayRef<Requirement>)> fn) const {
  // We'll be walking through the list of requirements.
  ArrayRef<Requirement> reqs = getRequirements();
  unsigned curReqIdx = 0, numReqs = reqs.size();

  // ... and walking through the list of generic parameters.
  ArrayRef<GenericTypeParamType *> genericParams = getGenericParams();
  unsigned curGenericParamIdx = 0, numGenericParams = genericParams.size();

  // Figure out which generic parameters are complete.
  SmallVector<bool, 4> genericParamsAreConcrete(genericParams.size(), false);
  for (auto req : reqs) {
    if (req.getKind() != RequirementKind::SameType) continue;
    if (req.getSecondType()->isTypeParameter()) continue;

    auto gp = req.getFirstType()->getAs<GenericTypeParamType>();
    if (!gp) continue;

    unsigned index = GenericParamKey(gp).findIndexIn(genericParams);
    genericParamsAreConcrete[index] = true;
  }

  /// Local function to 'catch up' to the next dependent type we're going to
  /// visit, calling the function for each of the generic parameters in the
  /// generic parameter list prior to this parameter.
  auto enumerateGenericParamsUpToDependentType = [&](CanType depTy) -> bool {
    // Figure out where we should stop when enumerating generic parameters.
    unsigned stopDepth, stopIndex;
    if (auto gp = dyn_cast_or_null<GenericTypeParamType>(depTy)) {
      stopDepth = gp->getDepth();
      stopIndex = gp->getIndex();
    } else {
      stopDepth = genericParams.back()->getDepth() + 1;
      stopIndex = 0;
    }

    // Enumerate generic parameters up to the stopping point, calling the
    // callback function for each one
    while (curGenericParamIdx != numGenericParams) {
      auto curGenericParam = genericParams[curGenericParamIdx];

      // If the current generic parameter is before our stopping point, call
      // the function.
      if (curGenericParam->getDepth() < stopDepth ||
          (curGenericParam->getDepth() == stopDepth &&
           curGenericParam->getIndex() < stopIndex)) {
        if (!genericParamsAreConcrete[curGenericParamIdx] &&
            fn(curGenericParam, { }))
          return true;

        ++curGenericParamIdx;
        continue;
      }

      // If the current generic parameter is at our stopping point, we're
      // done.
      if (curGenericParam->getDepth() == stopDepth &&
          curGenericParam->getIndex() == stopIndex) {
        ++curGenericParamIdx;
        return false;
      }

      // Otherwise, there's nothing to do.
      break;
    }

    return false;
  };

  // Walk over all of the requirements.
  while (curReqIdx != numReqs) {
    // "Catch up" by enumerating generic parameters up to this dependent type.
    CanType depTy = reqs[curReqIdx].getFirstType()->getCanonicalType();
    if (enumerateGenericParamsUpToDependentType(depTy)) return true;

    // Utility to skip over non-conformance constraints that apply to this
    // type.
    auto skipNonConformanceConstraints = [&] {
      while (curReqIdx != numReqs &&
             reqs[curReqIdx].getKind() != RequirementKind::Conformance &&
             reqs[curReqIdx].getFirstType()->getCanonicalType() == depTy) {
        ++curReqIdx;
      }
    };

    // First, skip past any non-conformance constraints on this type.
    skipNonConformanceConstraints();

    // Collect all of the conformance constraints for this dependent type.
    unsigned startIdx = curReqIdx;
    unsigned endIdx = curReqIdx;
    while (curReqIdx != numReqs &&
           reqs[curReqIdx].getKind() == RequirementKind::Conformance &&
           reqs[curReqIdx].getFirstType()->getCanonicalType() == depTy) {
      ++curReqIdx;
      endIdx = curReqIdx;
    }

    // Skip any trailing non-conformance constraints.
    skipNonConformanceConstraints();

    // If there were any conformance constraints, or we have a generic
    // parameter we can't skip, invoke the callback.
    if ((startIdx != endIdx ||
         (isa<GenericTypeParamType>(depTy) &&
          !genericParamsAreConcrete[
            GenericParamKey(cast<GenericTypeParamType>(depTy))
              .findIndexIn(genericParams)])) &&
        fn(depTy, reqs.slice(startIdx, endIdx-startIdx)))
      return true;
  }

  // Catch up on any remaining generic parameters.
  return enumerateGenericParamsUpToDependentType(CanType());
}
Esempio n. 15
0
 Expr *getActualLastValue() const {
   return Values.back();
 }
Esempio n. 16
0
/// foldMemoryOperand - Try folding stack slot references in Ops into their
/// instructions.
///
/// @param Ops    Operand indices from analyzeVirtReg().
/// @param LoadMI Load instruction to use instead of stack slot when non-null.
/// @return       True on success.
bool InlineSpiller::
foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
                  MachineInstr *LoadMI) {
  if (Ops.empty())
    return false;
  // Don't attempt folding in bundles.
  MachineInstr *MI = Ops.front().first;
  if (Ops.back().first != MI || MI->isBundled())
    return false;

  bool WasCopy = MI->isCopy();
  unsigned ImpReg = 0;

  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
  // operands.
  SmallVector<unsigned, 8> FoldOps;
  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
    unsigned Idx = Ops[i].second;
    MachineOperand &MO = MI->getOperand(Idx);
    if (MO.isImplicit()) {
      ImpReg = MO.getReg();
      continue;
    }
    // FIXME: Teach targets to deal with subregs.
    if (MO.getSubReg())
      return false;
    // We cannot fold a load instruction into a def.
    if (LoadMI && MO.isDef())
      return false;
    // Tied use operands should not be passed to foldMemoryOperand.
    if (!MI->isRegTiedToDefOperand(Idx))
      FoldOps.push_back(Idx);
  }

  MachineInstr *FoldMI =
                LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI)
                       : TII.foldMemoryOperand(MI, FoldOps, StackSlot);
  if (!FoldMI)
    return false;

  // Remove LIS for any dead defs in the original MI not in FoldMI.
  for (MIBundleOperands MO(MI); MO.isValid(); ++MO) {
    if (!MO->isReg())
      continue;
    unsigned Reg = MO->getReg();
    if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
        MRI.isReserved(Reg)) {
      continue;
    }
    MIBundleOperands::PhysRegInfo RI =
      MIBundleOperands(FoldMI).analyzePhysReg(Reg, &TRI);
    if (MO->readsReg()) {
      assert(RI.Reads && "Cannot fold physreg reader");
      continue;
    }
    if (RI.Defines)
      continue;
    // FoldMI does not define this physreg. Remove the LI segment.
    assert(MO->isDead() && "Cannot fold physreg def");
    for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) {
      if (LiveInterval *LI = LIS.getCachedRegUnit(*Units)) {
        SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
        if (VNInfo *VNI = LI->getVNInfoAt(Idx))
          LI->removeValNo(VNI);
      }
    }
  }
  LIS.ReplaceMachineInstrInMaps(MI, FoldMI);
  MI->eraseFromParent();

  // TII.foldMemoryOperand may have left some implicit operands on the
  // instruction.  Strip them.
  if (ImpReg)
    for (unsigned i = FoldMI->getNumOperands(); i; --i) {
      MachineOperand &MO = FoldMI->getOperand(i - 1);
      if (!MO.isReg() || !MO.isImplicit())
        break;
      if (MO.getReg() == ImpReg)
        FoldMI->RemoveOperand(i - 1);
    }

  DEBUG(dbgs() << "\tfolded:  " << LIS.getInstructionIndex(FoldMI) << '\t'
               << *FoldMI);
  if (!WasCopy)
    ++NumFolded;
  else if (Ops.front().second == 0)
    ++NumSpills;
  else
    ++NumReloads;
  return true;
}