int sys::ExecuteAndWait(StringRef Program, const char **Args, const char **Envp, ArrayRef<Optional<StringRef>> Redirects, unsigned SecondsToWait, unsigned MemoryLimit, std::string *ErrMsg, bool *ExecutionFailed) { assert(Redirects.empty() || Redirects.size() == 3); ProcessInfo PI; if (Execute(PI, Program, Args, Envp, Redirects, MemoryLimit, ErrMsg)) { if (ExecutionFailed) *ExecutionFailed = false; ProcessInfo Result = Wait( PI, SecondsToWait, /*WaitUntilTerminates=*/SecondsToWait == 0, ErrMsg); return Result.ReturnCode; } if (ExecutionFailed) *ExecutionFailed = true; return -1; }
bool SanitizerCoverageModule::InjectCoverage(Function &F, ArrayRef<BasicBlock *> AllBlocks) { if (AllBlocks.empty()) return false; switch (Options.CoverageType) { case SanitizerCoverageOptions::SCK_None: return false; case SanitizerCoverageOptions::SCK_Function: CreateFunctionGuardArray(1, F); InjectCoverageAtBlock(F, F.getEntryBlock(), 0, false); return true; default: { bool UseCalls = ClCoverageBlockThreshold < AllBlocks.size(); CreateFunctionGuardArray(AllBlocks.size(), F); for (size_t i = 0, N = AllBlocks.size(); i < N; i++) InjectCoverageAtBlock(F, *AllBlocks[i], i, UseCalls); return true; } } }
/// Prints out the given RuntimeFunction struct for x64, assuming that Obj is /// pointing to an object file. Unlike executable, fields in RuntimeFunction /// struct are filled with zeros, but instead there are relocations pointing to /// them so that the linker will fill targets' RVAs to the fields at link /// time. This function interprets the relocations to find the data to be used /// in the resulting executable. static void printRuntimeFunctionRels(const COFFObjectFile *Obj, const RuntimeFunction &RF, uint64_t SectionOffset, const std::vector<RelocationRef> &Rels) { outs() << "Function Table:\n"; outs() << " Start Address: "; printCOFFSymbolAddress(outs(), Rels, SectionOffset + /*offsetof(RuntimeFunction, StartAddress)*/ 0, RF.StartAddress); outs() << "\n"; outs() << " End Address: "; printCOFFSymbolAddress(outs(), Rels, SectionOffset + /*offsetof(RuntimeFunction, EndAddress)*/ 4, RF.EndAddress); outs() << "\n"; outs() << " Unwind Info Address: "; printCOFFSymbolAddress(outs(), Rels, SectionOffset + /*offsetof(RuntimeFunction, UnwindInfoOffset)*/ 8, RF.UnwindInfoOffset); outs() << "\n"; ArrayRef<uint8_t> XContents; uint64_t UnwindInfoOffset = 0; error(getSectionContents( Obj, Rels, SectionOffset + /*offsetof(RuntimeFunction, UnwindInfoOffset)*/ 8, XContents, UnwindInfoOffset)); if (XContents.empty()) return; UnwindInfoOffset += RF.UnwindInfoOffset; if (UnwindInfoOffset > XContents.size()) return; auto *UI = reinterpret_cast<const Win64EH::UnwindInfo *>(XContents.data() + UnwindInfoOffset); printWin64EHUnwindInfo(UI); }
Optional<OutputFilesComputer> OutputFilesComputer::create(const llvm::opt::ArgList &args, DiagnosticEngine &diags, const FrontendInputsAndOutputs &inputsAndOutputs) { Optional<std::vector<std::string>> outputArguments = getOutputFilenamesFromCommandLineOrFilelist(args, diags); if (!outputArguments) return None; const StringRef outputDirectoryArgument = outputArguments->size() == 1 && llvm::sys::fs::is_directory(outputArguments->front()) ? StringRef(outputArguments->front()) : StringRef(); ArrayRef<std::string> outputFileArguments = outputDirectoryArgument.empty() ? ArrayRef<std::string>(*outputArguments) : ArrayRef<std::string>(); const StringRef firstInput = inputsAndOutputs.hasSingleInput() ? StringRef(inputsAndOutputs.getFilenameOfFirstInput()) : StringRef(); const FrontendOptions::ActionType requestedAction = ArgsToFrontendOptionsConverter::determineRequestedAction(args); if (!outputFileArguments.empty() && outputFileArguments.size() != inputsAndOutputs.countOfInputsProducingMainOutputs()) { diags.diagnose( SourceLoc(), diag::error_if_any_output_files_are_specified_they_all_must_be); return None; } const file_types::ID outputType = FrontendOptions::formatForPrincipalOutputFileForAction(requestedAction); return OutputFilesComputer( diags, inputsAndOutputs, std::move(outputFileArguments), outputDirectoryArgument, firstInput, requestedAction, args.getLastArg(options::OPT_module_name), file_types::getExtension(outputType), FrontendOptions::doesActionProduceTextualOutput(requestedAction)); }
// On every indirect call we call a run-time function // __sanitizer_cov_indir_call* with two parameters: // - callee address, // - global cache array that contains kCacheSize pointers (zero-initialized). // The cache is used to speed up recording the caller-callee pairs. // The address of the caller is passed implicitly via caller PC. // kCacheSize is encoded in the name of the run-time function. void SanitizerCoverageModule::InjectCoverageForIndirectCalls( Function &F, ArrayRef<Instruction *> IndirCalls) { if (IndirCalls.empty()) return; const int kCacheSize = 16; const int kCacheAlignment = 64; // Align for better performance. Type *Ty = ArrayType::get(IntptrTy, kCacheSize); for (auto I : IndirCalls) { IRBuilder<> IRB(I); CallSite CS(I); Value *Callee = CS.getCalledValue(); if (isa<InlineAsm>(Callee)) continue; GlobalVariable *CalleeCache = new GlobalVariable( *F.getParent(), Ty, false, GlobalValue::PrivateLinkage, Constant::getNullValue(Ty), "__sancov_gen_callee_cache"); CalleeCache->setAlignment(kCacheAlignment); IRB.CreateCall(SanCovIndirCallFunction, {IRB.CreatePointerCast(Callee, IntptrTy), IRB.CreatePointerCast(CalleeCache, IntptrTy)}); } }
unsigned BPFInstrInfo::insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const { assert(!BytesAdded && "code size not handled"); // Shouldn't be a fall through. assert(TBB && "insertBranch must not be told to insert a fallthrough"); if (Cond.empty()) { // Unconditional branch assert(!FBB && "Unconditional branch with multiple successors!"); BuildMI(&MBB, DL, get(BPF::JMP)).addMBB(TBB); return 1; } llvm_unreachable("Unexpected conditional branch"); }
std::error_code IndexedInstrProfReader::getFunctionCounts( StringRef FuncName, uint64_t FuncHash, std::vector<uint64_t> &Counts) { auto Iter = Index->find(FuncName); if (Iter == Index->end()) return error(instrprof_error::unknown_function); // Found it. Look for counters with the right hash. ArrayRef<InstrProfRecord> Data = (*Iter); if (Data.empty()) return error(instrprof_error::malformed); for (unsigned I = 0, E = Data.size(); I < E; ++I) { // Check for a match and fill the vector if there is one. if (Data[I].Hash == FuncHash) { Counts = Data[I].Counts; return success(); } } return error(instrprof_error::hash_mismatch); }
void WebAssemblyTargetAsmStreamer::emitGlobal( ArrayRef<wasm::Global> Globals) { if (!Globals.empty()) { OS << "\t.globalvar \t"; bool First = true; for (const wasm::Global &G : Globals) { if (First) First = false; else OS << ", "; OS << WebAssembly::TypeToString(G.Type); if (!G.InitialModule.empty()) OS << '=' << G.InitialModule << ':' << G.InitialName; else OS << '=' << G.InitialValue; } OS << '\n'; } }
Size ClassLayout::getInstanceStart() const { ArrayRef<ElementLayout> elements = AllElements; while (!elements.empty()) { auto element = elements.front(); elements = elements.drop_front(); // Ignore empty elements. if (element.isEmpty()) { continue; } else if (element.hasByteOffset()) { // FIXME: assumes layout is always sequential! return element.getByteOffset(); } else { return Size(0); } } // If there are no non-empty elements, just return the computed size. return getSize(); }
// For a forwarding instruction, we loop over all operands and make sure that // all non-trivial values have the same ownership. ValueOwnershipKind ValueOwnershipKindVisitor::visitForwardingInst(SILInstruction *I) { ArrayRef<Operand> Ops = I->getAllOperands(); // A forwarding inst without operands must be trivial. if (Ops.empty()) return ValueOwnershipKind::Trivial; // Find the first index where we have a trivial value. auto Iter = find_if(Ops, [](const Operand &Op) -> bool { return Op.get().getOwnershipKind() != ValueOwnershipKind::Trivial; }); // All trivial. if (Iter == Ops.end()) { return ValueOwnershipKind::Trivial; } // See if we have any Any. If we do, just return that for now. if (any_of(Ops, [](const Operand &Op) -> bool { return Op.get().getOwnershipKind() == ValueOwnershipKind::Any; })) return ValueOwnershipKind::Any; unsigned Index = std::distance(Ops.begin(), Iter); ValueOwnershipKind Base = Ops[Index].get().getOwnershipKind(); for (const Operand &Op : Ops.slice(Index+1)) { auto OpKind = Op.get().getOwnershipKind(); if (OpKind.merge(ValueOwnershipKind::Trivial)) continue; auto MergedValue = Base.merge(OpKind.Value); if (!MergedValue.hasValue()) { llvm_unreachable("Forwarding inst with mismatching ownership kinds?!"); } } return Base; }
std::string Intrinsic::getName(ID id, ArrayRef<Type*> Tys) { assert(id < num_intrinsics && "Invalid intrinsic ID!"); static const char * const Table[] = { "not_intrinsic", #define GET_INTRINSIC_NAME_TABLE #include "llvm/Intrinsics.gen" #undef GET_INTRINSIC_NAME_TABLE }; if (Tys.empty()) return Table[id]; std::string Result(Table[id]); for (unsigned i = 0; i < Tys.size(); ++i) { if (PointerType* PTyp = dyn_cast<PointerType>(Tys[i])) { Result += ".p" + llvm::utostr(PTyp->getAddressSpace()) + EVT::getEVT(PTyp->getElementType()).getEVTString(); } else if (Tys[i]) Result += "." + EVT::getEVT(Tys[i]).getEVTString(); } return Result; }
bool CapturedDiagList::hasDiagnostic(ArrayRef<unsigned> IDs, SourceRange range) const { if (range.isInvalid()) return false; ListTy::const_iterator I = List.begin(); while (I != List.end()) { FullSourceLoc diagLoc = I->getLocation(); if ((IDs.empty() || // empty means any diagnostic in the range. std::find(IDs.begin(), IDs.end(), I->getID()) != IDs.end()) && !diagLoc.isBeforeInTranslationUnitThan(range.getBegin()) && (diagLoc == range.getEnd() || diagLoc.isBeforeInTranslationUnitThan(range.getEnd()))) { return true; } ++I; } return false; }
/// Determine if the given invocation should run as a subcommand. /// /// \param ExecName The name of the argv[0] we were invoked as. /// \param SubcommandName On success, the full name of the subcommand to invoke. /// \param Args On return, the adjusted program arguments to use. /// \returns True if running as a subcommand. static bool shouldRunAsSubcommand(StringRef ExecName, SmallString<256> &SubcommandName, const ArrayRef<const char *> Args, bool &isRepl) { assert(!Args.empty()); // If we are not run as 'swift', don't do anything special. This doesn't work // with symlinks with alternate names, but we can't detect 'swift' vs 'swiftc' // if we try and resolve using the actual executable path. if (ExecName != "swift") return false; // If there are no program arguments, always invoke as normal. if (Args.size() == 1) return false; // Otherwise, we have a program argument. If it looks like an option or a // path, then invoke in interactive mode with the arguments as given. StringRef FirstArg(Args[1]); if (FirstArg.startswith("-") || FirstArg.find('.') != StringRef::npos || FirstArg.find('/') != StringRef::npos) return false; // Otherwise, we should have some sort of subcommand. Get the subcommand name // and remove it from the program arguments. StringRef Subcommand = Args[1]; // If the subcommand is the "built-in" 'repl', then use the // normal driver. if (Subcommand == "repl") { isRepl = true; return false; } // Form the subcommand name. SubcommandName.assign("swift-"); SubcommandName.append(Subcommand); return true; }
StringRef CanonicalIncludes::mapHeader(ArrayRef<std::string> Headers, StringRef QualifiedName) const { assert(!Headers.empty()); auto SE = SymbolMapping.find(QualifiedName); if (SE != SymbolMapping.end()) return SE->second; // Find the first header such that the extension is not '.inc', and isn't a // recognized non-header file auto I = llvm::find_if(Headers, [](StringRef Include) { // Skip .inc file whose including header file should // be #included instead. return !Include.endswith(".inc"); }); if (I == Headers.end()) return Headers[0]; // Fallback to the declaring header. StringRef Header = *I; // If Header is not expected be included (e.g. .cc file), we fall back to // the declaring header. StringRef Ext = sys::path::extension(Header).trim('.'); // Include-able headers must have precompile type. Treat files with // non-recognized extenstions (TY_INVALID) as headers. auto ExtType = driver::types::lookupTypeForExtension(Ext); if ((ExtType != driver::types::TY_INVALID) && !driver::types::onlyPrecompileType(ExtType)) return Headers[0]; auto MapIt = FullPathMapping.find(Header); if (MapIt != FullPathMapping.end()) return MapIt->second; int Components = 1; for (auto It = sys::path::rbegin(Header), End = sys::path::rend(Header); It != End && Components <= MaxSuffixComponents; ++It, ++Components) { auto SubPath = Header.substr(It->data() - Header.begin()); auto MappingIt = SuffixHeaderMapping.find(SubPath); if (MappingIt != SuffixHeaderMapping.end()) return MappingIt->second; } return Header; }
unsigned WebAssemblyInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond, DebugLoc DL) const { assert(Cond.size() <= 1); if (Cond.empty()) { if (!TBB) return 0; BuildMI(&MBB, DL, get(WebAssembly::BR)).addMBB(TBB); return 1; } BuildMI(&MBB, DL, get(WebAssembly::BR_IF)).addOperand(Cond[0]).addMBB(TBB); if (!FBB) return 1; BuildMI(&MBB, DL, get(WebAssembly::BR)).addMBB(FBB); return 2; }
/// Recursively walk into the given formal index type, expanding tuples, /// in order to form the arguments to a subscript accessor. static void translateIndices(SILGenFunction &gen, SILLocation loc, AbstractionPattern pattern, CanType formalType, ArrayRef<ManagedValue> &sourceIndices, RValue &result) { // Expand if the pattern was a tuple. if (pattern.isTuple()) { auto formalTupleType = cast<TupleType>(formalType); for (auto i : indices(formalTupleType.getElementTypes())) { translateIndices(gen, loc, pattern.getTupleElementType(i), formalTupleType.getElementType(i), sourceIndices, result); } return; } assert(!sourceIndices.empty() && "ran out of elements in index!"); ManagedValue value = sourceIndices.front(); sourceIndices = sourceIndices.slice(1); // We're going to build an RValue here, so make sure we translate // indirect arguments to be scalar if we have a loadable type. if (value.getType().isAddress()) { auto &valueTL = gen.getTypeLowering(value.getType()); if (!valueTL.isAddressOnly()) { value = gen.emitLoad(loc, value.forward(gen), valueTL, SGFContext(), IsTake); } } // Reabstract the subscripts from the requirement pattern to the // formal type. value = gen.emitOrigToSubstValue(loc, value, pattern, formalType); // Invoking the accessor will expect a value of the formal type, so // don't reabstract to that here. // Add that to the result, further expanding if necessary. result.addElement(gen, value, formalType, loc); }
static SILValue getThunkedForeignFunctionRef(SILGenFunction &gen, SILLocation loc, SILDeclRef foreign, ArrayRef<ManagedValue> args, ArrayRef<Substitution> subs, const SILConstantInfo &foreignCI) { assert(!foreign.isCurried && "should not thunk calling convention when curried"); // Produce a witness_method when thunking ObjC protocol methods. auto dc = foreign.getDecl()->getDeclContext(); if (isa<ProtocolDecl>(dc) && cast<ProtocolDecl>(dc)->isObjC()) { assert(subs.size() == 1); auto thisType = subs[0].getReplacement()->getCanonicalType(); assert(isa<ArchetypeType>(thisType) && "no archetype for witness?!"); SILValue thisArg = args.back().getValue(); SILValue OpenedExistential; if (!cast<ArchetypeType>(thisType)->getOpenedExistentialType().isNull()) OpenedExistential = thisArg; auto conformance = ProtocolConformanceRef(cast<ProtocolDecl>(dc)); return gen.B.createWitnessMethod(loc, thisType, conformance, foreign, foreignCI.getSILType(), OpenedExistential); // Produce a class_method when thunking imported ObjC methods. } else if (foreignCI.SILFnType->getRepresentation() == SILFunctionTypeRepresentation::ObjCMethod) { assert(subs.empty()); SILValue thisArg = args.back().getValue(); return gen.B.createClassMethod(loc, thisArg, foreign, SILType::getPrimitiveObjectType(foreignCI.SILFnType), /*volatile*/ true); } // Otherwise, emit a function_ref. return gen.emitGlobalFunctionRef(loc, foreign); }
RValue MaterializeForSetEmitter:: collectIndicesFromParameters(SILGenFunction &gen, SILLocation loc, ArrayRef<ManagedValue> sourceIndices) { auto witnessSubscript = cast<SubscriptDecl>(WitnessStorage); CanType witnessIndicesType = witnessSubscript->getIndicesInterfaceType()->getCanonicalType(); CanType substIndicesType = getSubstWitnessInterfaceType(witnessIndicesType); auto reqSubscript = cast<SubscriptDecl>(RequirementStorage); auto pattern = SGM.Types.getIndicesAbstractionPattern(reqSubscript); RValue result(pattern, substIndicesType); // Translate and reabstract the index values by recursively walking // the abstracted index type. translateIndices(gen, loc, pattern, substIndicesType, sourceIndices, result); assert(sourceIndices.empty() && "index value not claimed!"); return result; }
void Dumper::printRuntimeFunction(const Context &Ctx, const coff_section *Section, uint64_t SectionOffset, const RuntimeFunction &RF) { DictScope RFS(SW, "RuntimeFunction"); printRuntimeFunctionEntry(Ctx, Section, SectionOffset, RF); const coff_section *XData; uint64_t Offset; resolveRelocation(Ctx, Section, SectionOffset + 8, XData, Offset); ArrayRef<uint8_t> Contents; error(Ctx.COFF.getSectionContents(XData, Contents)); if (Contents.empty()) return; Offset = Offset + RF.UnwindInfoOffset; if (Offset > Contents.size()) return; const auto UI = reinterpret_cast<const UnwindInfo*>(Contents.data() + Offset); printUnwindInfo(Ctx, XData, Offset, *UI); }
void ScopedPrinter::printBinaryImpl(StringRef Label, StringRef Str, ArrayRef<uint8_t> Data, bool Block, uint32_t StartOffset) { if (Data.size() > 16) Block = true; if (Block) { startLine() << Label; if (!Str.empty()) OS << ": " << Str; OS << " (\n"; if (!Data.empty()) OS << format_bytes_with_ascii(Data, StartOffset, 16, 4, (IndentLevel + 1) * 2, true) << "\n"; startLine() << ")\n"; } else { startLine() << Label << ":"; if (!Str.empty()) OS << " " << Str; OS << " (" << format_bytes(Data, None, Data.size(), 1, 0, true) << ")\n"; } }
MachineInstrBuilder MachineIRBuilder::buildSequence(unsigned Res, ArrayRef<unsigned> Ops, ArrayRef<uint64_t> Indices) { #ifndef NDEBUG assert(Ops.size() == Indices.size() && "incompatible args"); assert(!Ops.empty() && "invalid trivial sequence"); assert(std::is_sorted(Indices.begin(), Indices.end()) && "sequence offsets must be in ascending order"); assert(MRI->getType(Res).isValid() && "invalid operand type"); for (auto Op : Ops) assert(MRI->getType(Op).isValid() && "invalid operand type"); #endif MachineInstrBuilder MIB = buildInstr(TargetOpcode::G_SEQUENCE); MIB.addDef(Res); for (unsigned i = 0; i < Ops.size(); ++i) { MIB.addUse(Ops[i]); MIB.addImm(Indices[i]); } return MIB; }
unsigned NVPTXInstrInfo::InsertBranch( MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond, DebugLoc DL) const { // Shouldn't be a fall through. assert(TBB && "InsertBranch must not be told to insert a fallthrough"); assert((Cond.size() == 1 || Cond.size() == 0) && "NVPTX branch conditions have two components!"); // One-way branch. if (!FBB) { if (Cond.empty()) // Unconditional branch BuildMI(&MBB, DL, get(NVPTX::GOTO)).addMBB(TBB); else // Conditional branch BuildMI(&MBB, DL, get(NVPTX::CBranch)).addReg(Cond[0].getReg()) .addMBB(TBB); return 1; } // Two-way Conditional Branch. BuildMI(&MBB, DL, get(NVPTX::CBranch)).addReg(Cond[0].getReg()).addMBB(TBB); BuildMI(&MBB, DL, get(NVPTX::GOTO)).addMBB(FBB); return 2; }
std::vector<int> makeGpuIds(ArrayRef<const int> compatibleGpus, size_t numGpuTasks) { std::vector<int> gpuIdsToUse; gpuIdsToUse.reserve(numGpuTasks); auto currentGpuId = compatibleGpus.begin(); for (size_t i = 0; i != numGpuTasks; ++i) { GMX_ASSERT(!compatibleGpus.empty(), "Must have compatible GPUs from which to build a list of GPU IDs to use"); gpuIdsToUse.push_back(*currentGpuId); ++currentGpuId; if (currentGpuId == compatibleGpus.end()) { // Wrap around and assign tasks again. currentGpuId = compatibleGpus.begin(); } } std::sort(gpuIdsToUse.begin(), gpuIdsToUse.end()); return gpuIdsToUse; }
void D3D11TextureCube::CreateHWResource(ArrayRef<ElementInitData> init_data, float4 const * clear_value_hint) { KFL_UNUSED(clear_value_hint); D3D11_TEXTURE2D_DESC desc; desc.Width = width_; desc.Height = width_; desc.MipLevels = num_mip_maps_; desc.ArraySize = 6 * array_size_; desc.Format = D3D11Mapping::MappingFormat(format_); desc.SampleDesc.Count = 1; desc.SampleDesc.Quality = 0; this->GetD3DFlags(desc.Usage, desc.BindFlags, desc.CPUAccessFlags, desc.MiscFlags); desc.MiscFlags |= D3D11_RESOURCE_MISC_TEXTURECUBE; std::vector<D3D11_SUBRESOURCE_DATA> subres_data; if (!init_data.empty()) { BOOST_ASSERT(init_data.size() == 6 * array_size_ * num_mip_maps_); subres_data.resize(init_data.size()); for (size_t i = 0; i < init_data.size(); ++ i) { subres_data[i].pSysMem = init_data[i].data; subres_data[i].SysMemPitch = init_data[i].row_pitch; subres_data[i].SysMemSlicePitch = init_data[i].slice_pitch; } } ID3D11Texture2D* d3d_tex; TIFHR(d3d_device_->CreateTexture2D(&desc, subres_data.data(), &d3d_tex)); d3d_texture_ = MakeCOMPtr(d3d_tex); if ((access_hint_ & (EAH_GPU_Read | EAH_Generate_Mips)) && (num_mip_maps_ > 1)) { this->RetriveD3DShaderResourceView(0, array_size_, 0, num_mip_maps_); } }
// This function is just the same as convertUTF16ToUTF8String, but adapted to UTF32, since it did not exist in clang bool convertUTF32ToUTF8String(ArrayRef<char> SrcBytes, std::string &Out) { assert(Out.empty()); // Error out on an uneven byte count. if (SrcBytes.size() % 4) return false; // Avoid OOB by returning early on empty input. if (SrcBytes.empty()) return true; const UTF32 *Src = reinterpret_cast<const UTF32 *>(SrcBytes.begin()); const UTF32 *SrcEnd = reinterpret_cast<const UTF32 *>(SrcBytes.end()); // Byteswap if necessary. // Ignore any potential BOM: We won't have the here... // Just allocate enough space up front. We'll shrink it later. Allocate // enough that we can fit a null terminator without reallocating. Out.resize(SrcBytes.size() * UNI_MAX_UTF8_BYTES_PER_CODE_POINT + 1); UTF8 *Dst = reinterpret_cast<UTF8 *>(&Out[0]); UTF8 *DstEnd = Dst + Out.size(); ConversionResult CR = ConvertUTF32toUTF8(&Src, SrcEnd, &Dst, DstEnd, strictConversion); assert(CR != targetExhausted); if (CR != conversionOK) { Out.clear(); return false; } Out.resize(reinterpret_cast<char *>(Dst)-&Out[0]); Out.push_back(0); Out.pop_back(); return true; }
// Build the unmodified AsmString used by the IR. Also build the individual // asm instruction(s) and place them in the AsmStrings vector; these are fed // to the AsmParser. static std::string buildMSAsmString(Sema &SemaRef, ArrayRef<Token> AsmToks, std::vector<std::string> &AsmStrings, std::vector<std::pair<unsigned,unsigned> > &AsmTokRanges) { assert (!AsmToks.empty() && "Didn't expect an empty AsmToks!"); SmallString<512> Res; SmallString<512> Asm; unsigned startTok = 0; for (unsigned i = 0, e = AsmToks.size(); i < e; ++i) { bool isNewAsm = i == 0 || AsmToks[i].isAtStartOfLine() || AsmToks[i].is(tok::kw_asm); if (isNewAsm) { if (i) { AsmStrings.push_back(Asm.str()); AsmTokRanges.push_back(std::make_pair(startTok, i-1)); startTok = i; Res += Asm; Asm.clear(); Res += '\n'; } if (AsmToks[i].is(tok::kw_asm)) { i++; // Skip __asm assert (i != e && "Expected another token"); } } if (i && AsmToks[i].hasLeadingSpace() && !isNewAsm) Asm += ' '; Asm += getSpelling(SemaRef, AsmToks[i]); } AsmStrings.push_back(Asm.str()); AsmTokRanges.push_back(std::make_pair(startTok, AsmToks.size()-1)); Res += Asm; return Res.str(); }
/// Print the short-form @available() attribute for an array of long-form /// AvailableAttrs that can be represented in the short form. /// For example, for: /// @available(OSX, introduced: 10.10) /// @available(iOS, introduced: 8.0) /// this will print: /// @available(OSX 10.10, iOS 8.0, *) static void printShortFormAvailable(ArrayRef<const DeclAttribute *> Attrs, ASTPrinter &Printer, const PrintOptions &Options) { assert(!Attrs.empty()); Printer << "@available("; auto FirstAvail = cast<AvailableAttr>(Attrs.front()); if (Attrs.size() == 1 && FirstAvail->isLanguageVersionSpecific()) { assert(FirstAvail->Introduced.hasValue()); Printer << "swift " << FirstAvail->Introduced.getValue().getAsString() << ")"; } else { for (auto *DA : Attrs) { auto *AvailAttr = cast<AvailableAttr>(DA); assert(AvailAttr->Introduced.hasValue()); Printer << platformString(AvailAttr->Platform) << " " << AvailAttr->Introduced.getValue().getAsString() << ", "; } Printer << "*)"; } Printer.printNewline(); }
Expected<std::unique_ptr<CoverageMapping>> CoverageMapping::load(ArrayRef<StringRef> ObjectFilenames, StringRef ProfileFilename, ArrayRef<StringRef> Arches) { auto ProfileReaderOrErr = IndexedInstrProfReader::create(ProfileFilename); if (Error E = ProfileReaderOrErr.takeError()) return std::move(E); auto ProfileReader = std::move(ProfileReaderOrErr.get()); SmallVector<std::unique_ptr<CoverageMappingReader>, 4> Readers; SmallVector<std::unique_ptr<MemoryBuffer>, 4> Buffers; for (const auto &File : llvm::enumerate(ObjectFilenames)) { auto CovMappingBufOrErr = MemoryBuffer::getFileOrSTDIN(File.value()); if (std::error_code EC = CovMappingBufOrErr.getError()) return errorCodeToError(EC); StringRef Arch = Arches.empty() ? StringRef() : Arches[File.index()]; auto CoverageReaderOrErr = BinaryCoverageReader::create(CovMappingBufOrErr.get(), Arch); if (Error E = CoverageReaderOrErr.takeError()) return std::move(E); Readers.push_back(std::move(CoverageReaderOrErr.get())); Buffers.push_back(std::move(CovMappingBufOrErr.get())); } return load(Readers, *ProfileReader); }
// Build the inline assembly string. Returns true on error. static bool buildMSAsmString(Sema &SemaRef, SourceLocation AsmLoc, ArrayRef<Token> AsmToks, SmallVectorImpl<unsigned> &TokOffsets, std::string &AsmString) { assert (!AsmToks.empty() && "Didn't expect an empty AsmToks!"); SmallString<512> Asm; for (unsigned i = 0, e = AsmToks.size(); i < e; ++i) { bool isNewAsm = ((i == 0) || AsmToks[i].isAtStartOfLine() || AsmToks[i].is(tok::kw_asm)); if (isNewAsm) { if (i != 0) Asm += "\n\t"; if (AsmToks[i].is(tok::kw_asm)) { i++; // Skip __asm if (i == e) { SemaRef.Diag(AsmLoc, diag::err_asm_empty); return true; } } } if (i && AsmToks[i].hasLeadingSpace() && !isNewAsm) Asm += ' '; StringRef Spelling = getSpelling(SemaRef, AsmToks[i]); Asm += Spelling; TokOffsets.push_back(Asm.size()); } AsmString = Asm.str(); return false; }
// At each release point, release the reaching values that have been stored to // this address. // // The caller has already determined that all Stores are to the same element // within an otherwise dead object. static void insertReleases(ArrayRef<StoreInst*> Stores, ArrayRef<SILInstruction*> ReleasePoints, SILSSAUpdater &SSAUp) { assert(!Stores.empty()); SILValue StVal = Stores.front()->getSrc(); SSAUp.Initialize(StVal->getType()); for (auto *Store : Stores) SSAUp.AddAvailableValue(Store->getParent(), Store->getSrc()); for (auto *RelPoint : ReleasePoints) { SILBuilder B(RelPoint); // This does not use the SSAUpdater::RewriteUse API because it does not do // the right thing for local uses. We have already ensured a single store // per block, and all release points occur after all stores. Therefore we // can simply ask SSAUpdater for the reaching store. SILValue RelVal = SSAUp.GetValueAtEndOfBlock(RelPoint->getParent()); if (StVal->getType().isReferenceCounted(RelPoint->getModule())) B.createStrongRelease(RelPoint->getLoc(), RelVal)->getOperandRef(); else B.createReleaseValue(RelPoint->getLoc(), RelVal)->getOperandRef(); } }