LoadResult DiagLoader::readString(CXLoadedDiagnosticSetImpl &TopDiags, llvm::StringRef &RetStr, llvm::StringRef errorContext, RecordData &Record, StringRef Blob, bool allowEmptyString) { // Basic buffer overflow check. if (Blob.size() > 65536) { reportInvalidFile(std::string("Out-of-bounds string in ") + std::string(errorContext)); return Failure; } if (allowEmptyString && Record.size() >= 1 && Blob.size() == 0) { RetStr = ""; return Success; } if (Record.size() < 1 || Blob.size() == 0) { reportInvalidFile(std::string("Corrupted ") + std::string(errorContext) + std::string(" entry")); return Failure; } RetStr = TopDiags.makeString(Blob); return Success; }
StatusWith<RecordData> KVRecordStore::updateWithDamages( OperationContext* txn, const RecordId& id, const RecordData& oldRec, const char* damageSource, const mutablebson::DamageVector& damages ) { const KeyString key(id); const Slice oldValue(oldRec.data(), oldRec.size()); const KVUpdateWithDamagesMessage message(damageSource, damages); // updateWithDamages can't change the number or size of records, so we don't need to update // stats. const Status s = _db->update(txn, Slice::of(key), oldValue, message); if (!s.isOK()) { return StatusWith<RecordData>(s); } // We also need to reach in and screw with the old doc's data so that the update system gets // the new image, because the update system is assuming mmapv1's behavior. Sigh. for (mutablebson::DamageVector::const_iterator it = damages.begin(); it != damages.end(); it++) { const mutablebson::DamageEvent &event = *it; invariant(event.targetOffset + event.size < static_cast<uint32_t>(oldRec.size())); std::copy(damageSource + event.sourceOffset, damageSource + event.sourceOffset + event.size, /* eek */ const_cast<char *>(oldRec.data()) + event.targetOffset); } return StatusWith<RecordData>(oldRec); }
// Insert a record and try to perform an in-place update on it. TEST( RecordStoreTestHarness, UpdateWithDamages ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } string data = "00010111"; DiskLoc loc; const RecordData rec(data.c_str(), data.size() + 1); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), rec.data(), rec.size(), false ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { mutablebson::DamageVector dv( 3 ); dv[0].sourceOffset = 5; dv[0].targetOffset = 0; dv[0].size = 2; dv[1].sourceOffset = 3; dv[1].targetOffset = 2; dv[1].size = 3; dv[2].sourceOffset = 0; dv[2].targetOffset = 5; dv[2].size = 3; WriteUnitOfWork uow( opCtx.get() ); ASSERT_OK( rs->updateWithDamages( opCtx.get(), loc, rec, data.c_str(), dv ) ); uow.commit(); } } data = "11101000"; { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { RecordData record = rs->dataFor( opCtx.get(), loc ); ASSERT_EQUALS( data, record.data() ); } } }
LoadResult DiagLoader::readLocation(CXLoadedDiagnosticSetImpl &TopDiags, RecordData &Record, unsigned &offset, CXLoadedDiagnostic::Location &Loc) { if (Record.size() < offset + 3) { reportInvalidFile("Corrupted source location"); return Failure; } unsigned fileID = Record[offset++]; if (fileID == 0) { // Sentinel value. Loc.file = 0; Loc.line = 0; Loc.column = 0; Loc.offset = 0; return Success; } const FileEntry *FE = TopDiags.Files[fileID]; if (!FE) { reportInvalidFile("Corrupted file entry in source location"); return Failure; } Loc.file = const_cast<FileEntry *>(FE); Loc.line = Record[offset++]; Loc.column = Record[offset++]; Loc.offset = Record[offset++]; return Success; }
TEST( RecordStoreTestHarness, UpdateInPlace1 ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); if (!rs->updateWithDamagesSupported()) return; string s1 = "aaa111bbb"; string s2 = "aaa222bbb"; RecordId loc; const RecordData s1Rec(s1.c_str(), s1.size() + 1); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s1Rec.data(), s1Rec.size(), -1 ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( s1, rs->dataFor( opCtx.get(), loc ).data() ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); const char* damageSource = "222"; mutablebson::DamageVector dv; dv.push_back( mutablebson::DamageEvent() ); dv[0].sourceOffset = 0; dv[0].targetOffset = 3; dv[0].size = 3; Status res = rs->updateWithDamages( opCtx.get(), loc, s1Rec, damageSource, dv ); ASSERT_OK( res ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( s2, rs->dataFor( opCtx.get(), loc ).data() ); } }
TEST( RocksRecordStoreTest, Snapshots1 ) { unittest::TempDir td( _rocksRecordStoreTestDir ); scoped_ptr<rocksdb::DB> db( getDB( td.path() ) ); DiskLoc loc; int size = -1; { RocksRecordStore rs( "foo.bar", db.get(), db->DefaultColumnFamily(), db->DefaultColumnFamily() ); string s = "test string"; size = s.length() + 1; MyOperationContext opCtx( db.get() ); { WriteUnitOfWork uow( opCtx.recoveryUnit() ); StatusWith<DiskLoc> res = rs.insertRecord( &opCtx, s.c_str(), s.size() + 1, -1 ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); } } { MyOperationContext opCtx( db.get() ); MyOperationContext opCtx2( db.get() ); RocksRecordStore rs( "foo.bar", db.get(), db->DefaultColumnFamily(), db->DefaultColumnFamily() ); rs.deleteRecord( &opCtx, loc ); RecordData recData = rs.dataFor( loc/*, &opCtx */ ); ASSERT( !recData.data() && recData.size() == 0 ); // XXX this test doesn't yet work, but there should be some notion of snapshots, // and the op context that doesn't see the deletion shouldn't know that this data // has been deleted RecordData recData2 = rs.dataFor( loc/*, &opCtx2 */ ); ASSERT( recData.data() && recData.size() == size ); } }
// Insert a record and try to call updateWithDamages() with an empty DamageVector. TEST( RecordStoreTestHarness, UpdateWithNoDamages ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); if (!rs->updateWithDamagesSupported()) return; { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } string data = "my record"; RecordId loc; const RecordData rec(data.c_str(), data.size() + 1); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), rec.data(), rec.size(), false ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { mutablebson::DamageVector dv; WriteUnitOfWork uow( opCtx.get() ); ASSERT_OK( rs->updateWithDamages( opCtx.get(), loc, rec, "", dv ) ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { RecordData record = rs->dataFor( opCtx.get(), loc ); ASSERT_EQUALS( data, record.data() ); } } }
LoadResult DiagLoader::readMetaBlock(llvm::BitstreamCursor &Stream) { if (Stream.EnterSubBlock(lfort::serialized_diags::BLOCK_META)) { reportInvalidFile("Malformed metadata block"); return Failure; } bool versionChecked = false; while (true) { unsigned blockOrCode = 0; StreamResult Res = readToNextRecordOrBlock(Stream, "Metadata Block", blockOrCode); switch(Res) { case Read_EndOfStream: llvm_unreachable("EndOfStream handled by readToNextRecordOrBlock"); case Read_Failure: return Failure; case Read_Record: break; case Read_BlockBegin: if (Stream.SkipBlock()) { reportInvalidFile("Malformed metadata block"); return Failure; } case Read_BlockEnd: if (!versionChecked) { reportInvalidFile("Diagnostics file does not contain version" " information"); return Failure; } return Success; } RecordData Record; const char *Blob; unsigned BlobLen; unsigned recordID = Stream.ReadRecord(blockOrCode, Record, &Blob, &BlobLen); if (recordID == serialized_diags::RECORD_VERSION) { if (Record.size() < 1) { reportInvalidFile("malformed VERSION identifier in diagnostics file"); return Failure; } if (Record[0] > MaxSupportedVersion) { reportInvalidFile("diagnosics file is a newer version than the one " "supported"); return Failure; } versionChecked = true; } } }
// Insert multiple records and verify their contents by calling dataFor() // on each of the returned RecordIds. TEST(RecordStoreTestHarness, DataForMultiple) { unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore()); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } const int nToInsert = 10; RecordId locs[nToInsert]; for (int i = 0; i < nToInsert; i++) { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { stringstream ss; ss << "record----" << i; string data = ss.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false); ASSERT_OK(res.getStatus()); locs[i] = res.getValue(); uow.commit(); } } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get())); } for (int i = 0; i < nToInsert; i++) { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { stringstream ss; ss << "record----" << i; string data = ss.str(); RecordData record = rs->dataFor(opCtx.get(), locs[i]); ASSERT_EQUALS(data.size() + 1, static_cast<size_t>(record.size())); ASSERT_EQUALS(data, record.data()); } } }
// Insert a record and verify its contents by calling dataFor() // on the returned RecordId. TEST(RecordStoreTestHarness, DataFor) { unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore()); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } string data = "record-"; RecordId loc; { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false); ASSERT_OK(res.getStatus()); loc = res.getValue(); uow.commit(); } } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(1, rs->numRecords(opCtx.get())); } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { RecordData record = rs->dataFor(opCtx.get(), loc); ASSERT_EQUALS(data.size() + 1, static_cast<size_t>(record.size())); ASSERT_EQUALS(data, record.data()); } } }
LoadResult DiagLoader::readDiagnosticBlock(llvm::BitstreamCursor &Stream, CXDiagnosticSetImpl &Diags, CXLoadedDiagnosticSetImpl &TopDiags){ if (Stream.EnterSubBlock(clang::serialized_diags::BLOCK_DIAG)) { reportInvalidFile("malformed diagnostic block"); return Failure; } OwningPtr<CXLoadedDiagnostic> D(new CXLoadedDiagnostic()); RecordData Record; while (true) { unsigned blockOrCode = 0; StreamResult Res = readToNextRecordOrBlock(Stream, "Diagnostic Block", blockOrCode); switch (Res) { case Read_EndOfStream: llvm_unreachable("EndOfStream handled in readToNextRecordOrBlock"); case Read_Failure: return Failure; case Read_BlockBegin: { // The only blocks we care about are subdiagnostics. if (blockOrCode != serialized_diags::BLOCK_DIAG) { if (!Stream.SkipBlock()) { reportInvalidFile("Invalid subblock in Diagnostics block"); return Failure; } } else if (readDiagnosticBlock(Stream, D->getChildDiagnostics(), TopDiags)) { return Failure; } continue; } case Read_BlockEnd: Diags.appendDiagnostic(D.take()); return Success; case Read_Record: break; } // Read the record. Record.clear(); StringRef Blob; unsigned recID = Stream.readRecord(blockOrCode, Record, &Blob); if (recID < serialized_diags::RECORD_FIRST || recID > serialized_diags::RECORD_LAST) continue; switch ((serialized_diags::RecordIDs)recID) { case serialized_diags::RECORD_VERSION: continue; case serialized_diags::RECORD_CATEGORY: if (readString(TopDiags, TopDiags.Categories, "category", Record, Blob, /* allowEmptyString */ true)) return Failure; continue; case serialized_diags::RECORD_DIAG_FLAG: if (readString(TopDiags, TopDiags.WarningFlags, "warning flag", Record, Blob)) return Failure; continue; case serialized_diags::RECORD_FILENAME: { if (readString(TopDiags, TopDiags.FileNames, "filename", Record, Blob)) return Failure; if (Record.size() < 3) { reportInvalidFile("Invalid file entry"); return Failure; } const FileEntry *FE = TopDiags.FakeFiles.getVirtualFile(TopDiags.FileNames[Record[0]], /* size */ Record[1], /* time */ Record[2]); TopDiags.Files[Record[0]] = FE; continue; } case serialized_diags::RECORD_SOURCE_RANGE: { CXSourceRange SR; if (readRange(TopDiags, Record, 0, SR)) return Failure; D->Ranges.push_back(SR); continue; } case serialized_diags::RECORD_FIXIT: { CXSourceRange SR; if (readRange(TopDiags, Record, 0, SR)) return Failure; llvm::StringRef RetStr; if (readString(TopDiags, RetStr, "FIXIT", Record, Blob, /* allowEmptyString */ true)) return Failure; D->FixIts.push_back(std::make_pair(SR, createCXString(RetStr, false))); continue; } case serialized_diags::RECORD_DIAG: { D->severity = Record[0]; unsigned offset = 1; if (readLocation(TopDiags, Record, offset, D->DiagLoc)) return Failure; D->category = Record[offset++]; unsigned diagFlag = Record[offset++]; D->DiagOption = diagFlag ? TopDiags.WarningFlags[diagFlag] : ""; D->CategoryText = D->category ? TopDiags.Categories[D->category] : ""; D->Spelling = TopDiags.makeString(Blob); continue; } } } }
/// \brief Read the declaration at the given offset from the PCH file. Decl *PCHReader::ReadDeclRecord(uint64_t Offset, unsigned Index) { // Keep track of where we are in the stream, then jump back there // after reading this declaration. SavedStreamPosition SavedPosition(DeclsCursor); // Note that we are loading a declaration record. LoadingTypeOrDecl Loading(*this); DeclsCursor.JumpToBit(Offset); RecordData Record; unsigned Code = DeclsCursor.ReadCode(); unsigned Idx = 0; PCHDeclReader Reader(*this, Record, Idx); Decl *D = 0; switch ((pch::DeclCode)DeclsCursor.ReadRecord(Code, Record)) { case pch::DECL_ATTR: case pch::DECL_CONTEXT_LEXICAL: case pch::DECL_CONTEXT_VISIBLE: assert(false && "Record cannot be de-serialized with ReadDeclRecord"); break; case pch::DECL_TRANSLATION_UNIT: assert(Index == 0 && "Translation unit must be at index 0"); D = Context->getTranslationUnitDecl(); break; case pch::DECL_TYPEDEF: D = TypedefDecl::Create(*Context, 0, SourceLocation(), 0, 0); break; case pch::DECL_ENUM: D = EnumDecl::Create(*Context, 0, SourceLocation(), 0, SourceLocation(), 0); break; case pch::DECL_RECORD: D = RecordDecl::Create(*Context, TagDecl::TK_struct, 0, SourceLocation(), 0, SourceLocation(), 0); break; case pch::DECL_ENUM_CONSTANT: D = EnumConstantDecl::Create(*Context, 0, SourceLocation(), 0, QualType(), 0, llvm::APSInt()); break; case pch::DECL_FUNCTION: D = FunctionDecl::Create(*Context, 0, SourceLocation(), DeclarationName(), QualType(), 0); break; case pch::DECL_OBJC_METHOD: D = ObjCMethodDecl::Create(*Context, SourceLocation(), SourceLocation(), Selector(), QualType(), 0, 0); break; case pch::DECL_OBJC_INTERFACE: D = ObjCInterfaceDecl::Create(*Context, 0, SourceLocation(), 0); break; case pch::DECL_OBJC_IVAR: D = ObjCIvarDecl::Create(*Context, 0, SourceLocation(), 0, QualType(), 0, ObjCIvarDecl::None); break; case pch::DECL_OBJC_PROTOCOL: D = ObjCProtocolDecl::Create(*Context, 0, SourceLocation(), 0); break; case pch::DECL_OBJC_AT_DEFS_FIELD: D = ObjCAtDefsFieldDecl::Create(*Context, 0, SourceLocation(), 0, QualType(), 0); break; case pch::DECL_OBJC_CLASS: D = ObjCClassDecl::Create(*Context, 0, SourceLocation()); break; case pch::DECL_OBJC_FORWARD_PROTOCOL: D = ObjCForwardProtocolDecl::Create(*Context, 0, SourceLocation()); break; case pch::DECL_OBJC_CATEGORY: D = ObjCCategoryDecl::Create(*Context, 0, SourceLocation(), SourceLocation(), SourceLocation(), 0); break; case pch::DECL_OBJC_CATEGORY_IMPL: D = ObjCCategoryImplDecl::Create(*Context, 0, SourceLocation(), 0, 0); break; case pch::DECL_OBJC_IMPLEMENTATION: D = ObjCImplementationDecl::Create(*Context, 0, SourceLocation(), 0, 0); break; case pch::DECL_OBJC_COMPATIBLE_ALIAS: D = ObjCCompatibleAliasDecl::Create(*Context, 0, SourceLocation(), 0, 0); break; case pch::DECL_OBJC_PROPERTY: D = ObjCPropertyDecl::Create(*Context, 0, SourceLocation(), 0, SourceLocation(), QualType()); break; case pch::DECL_OBJC_PROPERTY_IMPL: D = ObjCPropertyImplDecl::Create(*Context, 0, SourceLocation(), SourceLocation(), 0, ObjCPropertyImplDecl::Dynamic, 0); break; case pch::DECL_FIELD: D = FieldDecl::Create(*Context, 0, SourceLocation(), 0, QualType(), 0, 0, false); break; case pch::DECL_VAR: D = VarDecl::Create(*Context, 0, SourceLocation(), 0, QualType(), 0, VarDecl::None); break; case pch::DECL_IMPLICIT_PARAM: D = ImplicitParamDecl::Create(*Context, 0, SourceLocation(), 0, QualType()); break; case pch::DECL_PARM_VAR: D = ParmVarDecl::Create(*Context, 0, SourceLocation(), 0, QualType(), 0, VarDecl::None, 0); break; case pch::DECL_FILE_SCOPE_ASM: D = FileScopeAsmDecl::Create(*Context, 0, SourceLocation(), 0); break; case pch::DECL_BLOCK: D = BlockDecl::Create(*Context, 0, SourceLocation()); break; case pch::DECL_NAMESPACE: D = NamespaceDecl::Create(*Context, 0, SourceLocation(), 0); break; } assert(D && "Unknown declaration reading PCH file"); LoadedDecl(Index, D); Reader.Visit(D); // If this declaration is also a declaration context, get the // offsets for its tables of lexical and visible declarations. if (DeclContext *DC = dyn_cast<DeclContext>(D)) { std::pair<uint64_t, uint64_t> Offsets = Reader.VisitDeclContext(DC); if (Offsets.first || Offsets.second) { DC->setHasExternalLexicalStorage(Offsets.first != 0); DC->setHasExternalVisibleStorage(Offsets.second != 0); DeclContextOffsets[DC] = Offsets; } } assert(Idx == Record.size()); // If we have deserialized a declaration that has a definition the // AST consumer might need to know about, notify the consumer // about that definition now or queue it for later. if (isConsumerInterestedIn(D)) { if (Consumer) { DeclGroupRef DG(D); Consumer->HandleTopLevelDecl(DG); } else { InterestingDecls.push_back(D); } } return D; }
/// \brief Reads attributes from the current stream position. Attr *PCHReader::ReadAttributes() { unsigned Code = DeclsCursor.ReadCode(); assert(Code == llvm::bitc::UNABBREV_RECORD && "Expected unabbreviated record"); (void)Code; RecordData Record; unsigned Idx = 0; unsigned RecCode = DeclsCursor.ReadRecord(Code, Record); assert(RecCode == pch::DECL_ATTR && "Expected attribute record"); (void)RecCode; #define SIMPLE_ATTR(Name) \ case Attr::Name: \ New = ::new (*Context) Name##Attr(); \ break #define STRING_ATTR(Name) \ case Attr::Name: \ New = ::new (*Context) Name##Attr(*Context, ReadString(Record, Idx)); \ break #define UNSIGNED_ATTR(Name) \ case Attr::Name: \ New = ::new (*Context) Name##Attr(Record[Idx++]); \ break Attr *Attrs = 0; while (Idx < Record.size()) { Attr *New = 0; Attr::Kind Kind = (Attr::Kind)Record[Idx++]; bool IsInherited = Record[Idx++]; switch (Kind) { default: assert(0 && "Unknown attribute!"); break; STRING_ATTR(Alias); UNSIGNED_ATTR(Aligned); SIMPLE_ATTR(AlwaysInline); SIMPLE_ATTR(AnalyzerNoReturn); STRING_ATTR(Annotate); STRING_ATTR(AsmLabel); SIMPLE_ATTR(BaseCheck); case Attr::Blocks: New = ::new (*Context) BlocksAttr( (BlocksAttr::BlocksAttrTypes)Record[Idx++]); break; SIMPLE_ATTR(CDecl); case Attr::Cleanup: New = ::new (*Context) CleanupAttr( cast<FunctionDecl>(GetDecl(Record[Idx++]))); break; SIMPLE_ATTR(Const); UNSIGNED_ATTR(Constructor); SIMPLE_ATTR(DLLExport); SIMPLE_ATTR(DLLImport); SIMPLE_ATTR(Deprecated); UNSIGNED_ATTR(Destructor); SIMPLE_ATTR(FastCall); SIMPLE_ATTR(Final); case Attr::Format: { std::string Type = ReadString(Record, Idx); unsigned FormatIdx = Record[Idx++]; unsigned FirstArg = Record[Idx++]; New = ::new (*Context) FormatAttr(*Context, Type, FormatIdx, FirstArg); break; } case Attr::FormatArg: { unsigned FormatIdx = Record[Idx++]; New = ::new (*Context) FormatArgAttr(FormatIdx); break; } case Attr::Sentinel: { int sentinel = Record[Idx++]; int nullPos = Record[Idx++]; New = ::new (*Context) SentinelAttr(sentinel, nullPos); break; } SIMPLE_ATTR(GNUInline); SIMPLE_ATTR(Hiding); case Attr::IBActionKind: New = ::new (*Context) IBActionAttr(); break; case Attr::IBOutletKind: New = ::new (*Context) IBOutletAttr(); break; SIMPLE_ATTR(Malloc); SIMPLE_ATTR(NoDebug); SIMPLE_ATTR(NoInline); SIMPLE_ATTR(NoReturn); SIMPLE_ATTR(NoThrow); case Attr::NonNull: { unsigned Size = Record[Idx++]; llvm::SmallVector<unsigned, 16> ArgNums; ArgNums.insert(ArgNums.end(), &Record[Idx], &Record[Idx] + Size); Idx += Size; New = ::new (*Context) NonNullAttr(*Context, ArgNums.data(), Size); break; } case Attr::ReqdWorkGroupSize: { unsigned X = Record[Idx++]; unsigned Y = Record[Idx++]; unsigned Z = Record[Idx++]; New = ::new (*Context) ReqdWorkGroupSizeAttr(X, Y, Z); break; } SIMPLE_ATTR(ObjCException); SIMPLE_ATTR(ObjCNSObject); SIMPLE_ATTR(CFReturnsNotRetained); SIMPLE_ATTR(CFReturnsRetained); SIMPLE_ATTR(NSReturnsNotRetained); SIMPLE_ATTR(NSReturnsRetained); SIMPLE_ATTR(Overloadable); SIMPLE_ATTR(Override); SIMPLE_ATTR(Packed); UNSIGNED_ATTR(PragmaPack); SIMPLE_ATTR(Pure); UNSIGNED_ATTR(Regparm); STRING_ATTR(Section); SIMPLE_ATTR(StdCall); SIMPLE_ATTR(TransparentUnion); SIMPLE_ATTR(Unavailable); SIMPLE_ATTR(Unused); SIMPLE_ATTR(Used); case Attr::Visibility: New = ::new (*Context) VisibilityAttr( (VisibilityAttr::VisibilityTypes)Record[Idx++]); break; SIMPLE_ATTR(WarnUnusedResult); SIMPLE_ATTR(Weak); SIMPLE_ATTR(WeakRef); SIMPLE_ATTR(WeakImport); } assert(New && "Unable to decode attribute?"); New->setInherited(IsInherited); New->setNext(Attrs); Attrs = New; } #undef UNSIGNED_ATTR #undef STRING_ATTR #undef SIMPLE_ATTR // The list of attributes was built backwards. Reverse the list // before returning it. Attr *PrevAttr = 0, *NextAttr = 0; while (Attrs) { NextAttr = Attrs->getNext(); Attrs->setNext(PrevAttr); PrevAttr = Attrs; Attrs = NextAttr; } return PrevAttr; }