bool KArchive::writeDir_impl(const QString &name, const QString &user, const QString &group, mode_t /*perm*/, time_t /*atime*/, time_t /*mtime*/, time_t /*ctime*/) { kdWarning(7040) << "New writeDir API not implemented in this class." << endl << "Falling back to old API (metadata information will be lost)" << endl; return writeDir(name, user, group); }
void HousekeepStoreAccount::UpdateDirectorySize( BackupStoreDirectory& rDirectory, IOStream::pos_type new_size_in_blocks) { #ifndef NDEBUG { std::string dirFilename; MakeObjectFilename(rDirectory.GetObjectID(), dirFilename); std::auto_ptr<RaidFileRead> dirStream( RaidFileRead::Open(mStoreDiscSet, dirFilename)); ASSERT(new_size_in_blocks == dirStream->GetDiscUsageInBlocks()); } #endif IOStream::pos_type old_size_in_blocks = rDirectory.GetUserInfo1_SizeInBlocks(); if(new_size_in_blocks == old_size_in_blocks) { return; } rDirectory.SetUserInfo1_SizeInBlocks(new_size_in_blocks); if (rDirectory.GetObjectID() == BACKUPSTORE_ROOT_DIRECTORY_ID) { return; } std::string parentFilename; MakeObjectFilename(rDirectory.GetContainerID(), parentFilename); std::auto_ptr<RaidFileRead> parentStream( RaidFileRead::Open(mStoreDiscSet, parentFilename)); BackupStoreDirectory parent(*parentStream); parentStream.reset(); BackupStoreDirectory::Entry* en = parent.FindEntryByID(rDirectory.GetObjectID()); ASSERT(en); if (en->GetSizeInBlocks() != old_size_in_blocks) { BOX_WARNING("Directory " << BOX_FORMAT_OBJECTID(rDirectory.GetObjectID()) << " entry in directory " << BOX_FORMAT_OBJECTID(rDirectory.GetContainerID()) << " had incorrect size " << en->GetSizeInBlocks() << ", should have been " << old_size_in_blocks); mErrorCount++; } en->SetSizeInBlocks(new_size_in_blocks); RaidFileWrite writeDir(mStoreDiscSet, parentFilename, mapNewRefs->GetRefCount(rDirectory.GetContainerID())); writeDir.Open(true /* allow overwriting */); parent.WriteToStream(writeDir); writeDir.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY); }
BackupStoreRefCountDatabase::refcount_t HousekeepStoreAccount::DeleteFile( int64_t InDirectory, int64_t ObjectID, BackupStoreDirectory &rDirectory, const std::string &rDirectoryFilename, BackupStoreInfo& rBackupStoreInfo) { // Find the entry inside the directory bool wasDeleted = false; bool wasOldVersion = false; int64_t deletedFileSizeInBlocks = 0; // A pointer to an object which requires committing if the directory save goes OK std::auto_ptr<RaidFileWrite> padjustedEntry; // BLOCK { BackupStoreRefCountDatabase::refcount_t refs = mapNewRefs->GetRefCount(ObjectID); BackupStoreDirectory::Entry *pentry = rDirectory.FindEntryByID(ObjectID); if(pentry == 0) { BOX_ERROR("Housekeeping on account " << BOX_FORMAT_ACCOUNT(mAccountID) << " " "found error: object " << BOX_FORMAT_OBJECTID(ObjectID) << " " "not found in dir " << BOX_FORMAT_OBJECTID(InDirectory) << ", " "indicates logic error/corruption? Run " "bbstoreaccounts check <accid> fix"); mErrorCount++; return refs; } // Record the flags it's got set wasDeleted = pentry->IsDeleted(); wasOldVersion = pentry->IsOld(); // Check this should be deleted if(!wasDeleted && !wasOldVersion) { // Things changed since we were last around return refs; } // Record size deletedFileSizeInBlocks = pentry->GetSizeInBlocks(); if(refs > 1) { // Not safe to merge patches if someone else has a // reference to this object, so just remove the // directory entry and return. rDirectory.DeleteEntry(ObjectID); if(wasDeleted) { rBackupStoreInfo.AdjustNumDeletedFiles(-1); } if(wasOldVersion) { rBackupStoreInfo.AdjustNumOldFiles(-1); } mapNewRefs->RemoveReference(ObjectID); return refs - 1; } // If the entry is involved in a chain of patches, it needs to be handled // a bit more carefully. if(pentry->GetDependsNewer() != 0 && pentry->GetDependsOlder() == 0) { // This entry is a patch from a newer entry. Just need to update the info on that entry. BackupStoreDirectory::Entry *pnewer = rDirectory.FindEntryByID(pentry->GetDependsNewer()); if(pnewer == 0 || pnewer->GetDependsOlder() != ObjectID) { THROW_EXCEPTION(BackupStoreException, PatchChainInfoBadInDirectory); } // Change the info in the newer entry so that this no longer points to this entry pnewer->SetDependsOlder(0); } else if(pentry->GetDependsOlder() != 0) { BackupStoreDirectory::Entry *polder = rDirectory.FindEntryByID(pentry->GetDependsOlder()); if(pentry->GetDependsNewer() == 0) { // There exists an older version which depends on this one. Need to combine the two over that one. // Adjust the other entry in the directory if(polder == 0 || polder->GetDependsNewer() != ObjectID) { THROW_EXCEPTION(BackupStoreException, PatchChainInfoBadInDirectory); } // Change the info in the older entry so that this no longer points to this entry polder->SetDependsNewer(0); } else { // This entry is in the middle of a chain, and two patches need combining. // First, adjust the directory entries BackupStoreDirectory::Entry *pnewer = rDirectory.FindEntryByID(pentry->GetDependsNewer()); if(pnewer == 0 || pnewer->GetDependsOlder() != ObjectID || polder == 0 || polder->GetDependsNewer() != ObjectID) { THROW_EXCEPTION(BackupStoreException, PatchChainInfoBadInDirectory); } // Remove the middle entry from the linked list by simply using the values from this entry pnewer->SetDependsOlder(pentry->GetDependsOlder()); polder->SetDependsNewer(pentry->GetDependsNewer()); } // COMMON CODE to both cases // Generate the filename of the older version std::string objFilenameOlder; MakeObjectFilename(pentry->GetDependsOlder(), objFilenameOlder); // Open it twice (it's the diff) std::auto_ptr<RaidFileRead> pdiff(RaidFileRead::Open(mStoreDiscSet, objFilenameOlder)); std::auto_ptr<RaidFileRead> pdiff2(RaidFileRead::Open(mStoreDiscSet, objFilenameOlder)); // Open this file std::string objFilename; MakeObjectFilename(ObjectID, objFilename); std::auto_ptr<RaidFileRead> pobjectBeingDeleted(RaidFileRead::Open(mStoreDiscSet, objFilename)); // And open a write file to overwrite the other directory entry padjustedEntry.reset(new RaidFileWrite(mStoreDiscSet, objFilenameOlder, mapNewRefs->GetRefCount(ObjectID))); padjustedEntry->Open(true /* allow overwriting */); if(pentry->GetDependsNewer() == 0) { // There exists an older version which depends on this one. Need to combine the two over that one. BackupStoreFile::CombineFile(*pdiff, *pdiff2, *pobjectBeingDeleted, *padjustedEntry); } else { // This entry is in the middle of a chain, and two patches need combining. BackupStoreFile::CombineDiffs(*pobjectBeingDeleted, *pdiff, *pdiff2, *padjustedEntry); } // The file will be committed later when the directory is safely commited. // Work out the adjusted size int64_t newSize = padjustedEntry->GetDiscUsageInBlocks(); int64_t sizeDelta = newSize - polder->GetSizeInBlocks(); mBlocksUsedDelta += sizeDelta; if(polder->IsDeleted()) { mBlocksInDeletedFilesDelta += sizeDelta; } if(polder->IsOld()) { mBlocksInOldFilesDelta += sizeDelta; } polder->SetSizeInBlocks(newSize); } // pentry no longer valid } // Delete it from the directory rDirectory.DeleteEntry(ObjectID); // Save directory back to disc // BLOCK { RaidFileWrite writeDir(mStoreDiscSet, rDirectoryFilename, mapNewRefs->GetRefCount(InDirectory)); writeDir.Open(true /* allow overwriting */); rDirectory.WriteToStream(writeDir); // Get the disc usage (must do this before commiting it) int64_t new_size = writeDir.GetDiscUsageInBlocks(); // Commit directory writeDir.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY); // Adjust block counts if the directory itself changed in size int64_t original_size = rDirectory.GetUserInfo1_SizeInBlocks(); int64_t adjust = new_size - original_size; mBlocksUsedDelta += adjust; mBlocksInDirectoriesDelta += adjust; UpdateDirectorySize(rDirectory, new_size); } // Commit any new adjusted entry if(padjustedEntry.get() != 0) { padjustedEntry->Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY); padjustedEntry.reset(); // delete it now } // Drop reference count by one. Must now be zero, to delete the file. bool remaining_refs = mapNewRefs->RemoveReference(ObjectID); ASSERT(!remaining_refs); // Delete from disc BOX_TRACE("Removing unreferenced object " << BOX_FORMAT_OBJECTID(ObjectID)); std::string objFilename; MakeObjectFilename(ObjectID, objFilename); RaidFileWrite del(mStoreDiscSet, objFilename, mapNewRefs->GetRefCount(ObjectID)); del.Delete(); // Adjust counts for the file ++mFilesDeleted; mBlocksUsedDelta -= deletedFileSizeInBlocks; if(wasDeleted) { mBlocksInDeletedFilesDelta -= deletedFileSizeInBlocks; rBackupStoreInfo.AdjustNumDeletedFiles(-1); } if(wasOldVersion) { mBlocksInOldFilesDelta -= deletedFileSizeInBlocks; rBackupStoreInfo.AdjustNumOldFiles(-1); } // Delete the directory? // Do this if... dir has zero entries, and is marked as deleted in it's containing directory if(rDirectory.GetNumberOfEntries() == 0) { // Candidate for deletion mEmptyDirectories.push_back(InDirectory); } return 0; }
void HousekeepStoreAccount::DeleteEmptyDirectory(int64_t dirId, std::vector<int64_t>& rToExamine, BackupStoreInfo& rBackupStoreInfo) { // Load up the directory to potentially delete std::string dirFilename; BackupStoreDirectory dir; int64_t dirSizeInBlocks = 0; // BLOCK { MakeObjectFilename(dirId, dirFilename); // Check it actually exists (just in case it gets // added twice to the list) if(!RaidFileRead::FileExists(mStoreDiscSet, dirFilename)) { // doesn't exist, next! return; } // load std::auto_ptr<RaidFileRead> dirStream( RaidFileRead::Open(mStoreDiscSet, dirFilename)); dirSizeInBlocks = dirStream->GetDiscUsageInBlocks(); dir.ReadFromStream(*dirStream, IOStream::TimeOutInfinite); } // Make sure this directory is actually empty if(dir.GetNumberOfEntries() != 0) { // Not actually empty, try next one return; } // Candidate for deletion... open containing directory std::string containingDirFilename; BackupStoreDirectory containingDir; int64_t containingDirSizeInBlocksOrig = 0; { MakeObjectFilename(dir.GetContainerID(), containingDirFilename); std::auto_ptr<RaidFileRead> containingDirStream( RaidFileRead::Open(mStoreDiscSet, containingDirFilename)); containingDirSizeInBlocksOrig = containingDirStream->GetDiscUsageInBlocks(); containingDir.ReadFromStream(*containingDirStream, IOStream::TimeOutInfinite); containingDir.SetUserInfo1_SizeInBlocks(containingDirSizeInBlocksOrig); } // Find the entry BackupStoreDirectory::Entry *pdirentry = containingDir.FindEntryByID(dir.GetObjectID()); // TODO FIXME invert test and reduce indentation if((pdirentry != 0) && pdirentry->IsDeleted()) { // Should be deleted containingDir.DeleteEntry(dir.GetObjectID()); // Is the containing dir now a candidate for deletion? if(containingDir.GetNumberOfEntries() == 0) { rToExamine.push_back(containingDir.GetObjectID()); } // Write revised parent directory RaidFileWrite writeDir(mStoreDiscSet, containingDirFilename, mapNewRefs->GetRefCount(containingDir.GetObjectID())); writeDir.Open(true /* allow overwriting */); containingDir.WriteToStream(writeDir); // get the disc usage (must do this before commiting it) int64_t dirSize = writeDir.GetDiscUsageInBlocks(); // Commit directory writeDir.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY); UpdateDirectorySize(containingDir, dirSize); // adjust usage counts for this directory if(dirSize > 0) { int64_t adjust = dirSize - containingDirSizeInBlocksOrig; mBlocksUsedDelta += adjust; mBlocksInDirectoriesDelta += adjust; } if (mapNewRefs->RemoveReference(dir.GetObjectID())) { // Still referenced BOX_TRACE("Housekeeping spared empty deleted dir " << BOX_FORMAT_OBJECTID(dirId) << " due to " << mapNewRefs->GetRefCount(dir.GetObjectID()) << "remaining references"); return; } // Delete the directory itself BOX_INFO("Housekeeping removing empty deleted dir " << BOX_FORMAT_OBJECTID(dirId)); RaidFileWrite del(mStoreDiscSet, dirFilename, mapNewRefs->GetRefCount(dir.GetObjectID())); del.Delete(); // And adjust usage counts for the directory that's // just been deleted mBlocksUsedDelta -= dirSizeInBlocks; mBlocksInDirectoriesDelta -= dirSizeInBlocks; // Update count ++mEmptyDirectoriesDeleted; rBackupStoreInfo.AdjustNumDirectories(-1); } }