llvm::error_code canonicalize(const llvm::Twine &path, llvm::SmallVectorImpl<char> &result) { std::string p = path.str(); #ifdef PATH_MAX int path_max = PATH_MAX; #else int path_max = pathconf(p.c_str(), _PC_PATH_MAX); if (path_max <= 0) path_max = 4096; #endif result.resize(path_max); realpath(p.c_str(), result.data()); result.resize(strlen(result.data())); return llvm::error_code::success(); }
void UUID::toString(llvm::SmallVectorImpl<char> &out) const { out.resize(UUID::StringBufferSize); uuid_unparse_upper(Value, out.data()); // Pop off the null terminator. assert(out.back() == '\0' && "did not null-terminate?!"); out.pop_back(); }
static void EraseUnwantedCUDAMatchesImpl(Sema &S, const FunctionDecl *Caller, llvm::SmallVectorImpl<T> &Matches, FetchDeclFn FetchDecl) { assert(S.getLangOpts().CUDATargetOverloads && "Should not be called w/o enabled target overloads."); if (Matches.size() <= 1) return; // Find the best call preference among the functions in Matches. Sema::CUDAFunctionPreference P, BestCFP = Sema::CFP_Never; for (auto const &Match : Matches) { P = S.IdentifyCUDAPreference(Caller, FetchDecl(Match)); if (P > BestCFP) BestCFP = P; } // Erase all functions with lower priority. for (unsigned I = 0, N = Matches.size(); I != N;) if (S.IdentifyCUDAPreference(Caller, FetchDecl(Matches[I])) < BestCFP) { Matches[I] = Matches[--N]; Matches.resize(N); } else { ++I; } }
bool Popen(const std::string& Cmd, llvm::SmallVectorImpl<char>& Buf, bool RdE) { if (FILE *PF = ::popen(RdE ? (Cmd + " 2>&1").c_str() : Cmd.c_str(), "r")) { Buf.resize(0); const size_t Chunk = Buf.capacity_in_bytes(); while (true) { const size_t Len = Buf.size(); Buf.resize(Len + Chunk); const size_t R = ::fread(&Buf[Len], sizeof(char), Chunk, PF); if (R < Chunk) { Buf.resize(Len + R); break; } } ::pclose(PF); return !Buf.empty(); } return false; }
void ClassTemplateDecl::getPartialSpecializations( llvm::SmallVectorImpl<ClassTemplatePartialSpecializationDecl *> &PS) { llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> &PartialSpecs = getPartialSpecializations(); PS.clear(); PS.resize(PartialSpecs.size()); for (llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>::iterator P = PartialSpecs.begin(), PEnd = PartialSpecs.end(); P != PEnd; ++P) { assert(!PS[P->getSequenceNumber()]); PS[P->getSequenceNumber()] = P->getMostRecentDeclaration(); } }
/// getSpelling - This method is used to get the spelling of a token into a /// SmallVector. Note that the returned StringRef may not point to the /// supplied buffer if a copy can be avoided. llvm::StringRef Preprocessor::getSpelling(const Token &Tok, llvm::SmallVectorImpl<char> &Buffer, bool *Invalid) const { // Try the fast path. if (const IdentifierInfo *II = Tok.getIdentifierInfo()) return II->getName(); // Resize the buffer if we need to copy into it. if (Tok.needsCleaning()) Buffer.resize(Tok.getLength()); const char *Ptr = Buffer.data(); unsigned Len = getSpelling(Tok, Ptr, Invalid); return llvm::StringRef(Ptr, Len); }
void FileManager::GetUniqueIDMapping( llvm::SmallVectorImpl<const FileEntry *> &UIDToFiles) const { UIDToFiles.clear(); UIDToFiles.resize(NextFileUID); // Map file entries for (llvm::StringMap<FileEntry*, llvm::BumpPtrAllocator>::const_iterator FE = FileEntries.begin(), FEEnd = FileEntries.end(); FE != FEEnd; ++FE) if (FE->getValue() && FE->getValue() != NON_EXISTENT_FILE) UIDToFiles[FE->getValue()->getUID()] = FE->getValue(); // Map virtual file entries for (llvm::SmallVector<FileEntry*, 4>::const_iterator VFE = VirtualFileEntries.begin(), VFEEnd = VirtualFileEntries.end(); VFE != VFEEnd; ++VFE) if (*VFE && *VFE != NON_EXISTENT_FILE) UIDToFiles[(*VFE)->getUID()] = *VFE; }
void IrAggr::addFieldInitializers( llvm::SmallVectorImpl<llvm::Constant *> &constants, const VarInitMap &explicitInitializers, AggregateDeclaration *decl, unsigned &offset, bool populateInterfacesWithVtbls) { if (ClassDeclaration *cd = decl->isClassDeclaration()) { if (cd->baseClass) { addFieldInitializers(constants, explicitInitializers, cd->baseClass, offset, populateInterfacesWithVtbls); } // has interface vtbls? if (cd->vtblInterfaces && cd->vtblInterfaces->dim > 0) { // Align interface infos to pointer size. unsigned aligned = (offset + Target::ptrsize - 1) & ~(Target::ptrsize - 1); if (offset < aligned) { add_zeros(constants, offset, aligned); offset = aligned; } // false when it's not okay to use functions from super classes bool newinsts = (cd == aggrdecl->isClassDeclaration()); size_t inter_idx = interfacesWithVtbls.size(); for (auto bc : *cd->vtblInterfaces) { constants.push_back(getInterfaceVtbl(bc, newinsts, inter_idx)); offset += Target::ptrsize; inter_idx++; if (populateInterfacesWithVtbls) interfacesWithVtbls.push_back(bc); } } } AggrTypeBuilder b(false, offset); b.addAggregate(decl, &explicitInitializers, AggrTypeBuilder::Aliases::Skip); offset = b.currentOffset(); const size_t baseLLFieldIndex = constants.size(); const size_t numNewLLFields = b.defaultTypes().size(); constants.resize(constants.size() + numNewLLFields, nullptr); // add explicit and non-overlapping implicit initializers for (const auto &pair : b.varGEPIndices()) { const auto field = pair.first; const size_t fieldIndex = pair.second; const auto explicitIt = explicitInitializers.find(field); llvm::Constant *init = (explicitIt != explicitInitializers.end() ? explicitIt->second : getDefaultInitializer(field)); constants[baseLLFieldIndex + fieldIndex] = FillSArrayDims(field->type, init); } // zero out remaining padding fields for (size_t i = 0; i < numNewLLFields; i++) { auto &init = constants[baseLLFieldIndex + i]; if (!init) init = llvm::Constant::getNullValue(b.defaultTypes()[i]); } }