Пример #1
0
BackupStoreRefCountDatabase::refcount_t HousekeepStoreAccount::DeleteFile(
	int64_t InDirectory, int64_t ObjectID, BackupStoreDirectory &rDirectory,
	const std::string &rDirectoryFilename,
	BackupStoreInfo& rBackupStoreInfo)
{
	// Find the entry inside the directory
	bool wasDeleted = false;
	bool wasOldVersion = false;
	int64_t deletedFileSizeInBlocks = 0;
	// A pointer to an object which requires committing if the directory save goes OK
	std::auto_ptr<RaidFileWrite> padjustedEntry;
	// BLOCK
	{
		BackupStoreRefCountDatabase::refcount_t refs =
			mapNewRefs->GetRefCount(ObjectID);

		BackupStoreDirectory::Entry *pentry = rDirectory.FindEntryByID(ObjectID);
		if(pentry == 0)
		{
			BOX_ERROR("Housekeeping on account " <<
				BOX_FORMAT_ACCOUNT(mAccountID) << " "
				"found error: object " <<
				BOX_FORMAT_OBJECTID(ObjectID) << " "
				"not found in dir " << 
				BOX_FORMAT_OBJECTID(InDirectory) << ", "
				"indicates logic error/corruption? Run "
				"bbstoreaccounts check <accid> fix");
			mErrorCount++;
			return refs;
		}

		// Record the flags it's got set
		wasDeleted = pentry->IsDeleted();
		wasOldVersion = pentry->IsOld();
		// Check this should be deleted
		if(!wasDeleted && !wasOldVersion)
		{
			// Things changed since we were last around
			return refs;
		}

		// Record size
		deletedFileSizeInBlocks = pentry->GetSizeInBlocks();

		if(refs > 1)
		{
			// Not safe to merge patches if someone else has a
			// reference to this object, so just remove the
			// directory entry and return.
			rDirectory.DeleteEntry(ObjectID);
			if(wasDeleted)
			{
				rBackupStoreInfo.AdjustNumDeletedFiles(-1);
			}

			if(wasOldVersion)
			{
				rBackupStoreInfo.AdjustNumOldFiles(-1);
			}

			mapNewRefs->RemoveReference(ObjectID);
			return refs - 1;
		}

		// If the entry is involved in a chain of patches, it needs to be handled
		// a bit more carefully.
		if(pentry->GetDependsNewer() != 0 && pentry->GetDependsOlder() == 0)
		{
			// This entry is a patch from a newer entry. Just need to update the info on that entry.
			BackupStoreDirectory::Entry *pnewer = rDirectory.FindEntryByID(pentry->GetDependsNewer());
			if(pnewer == 0 || pnewer->GetDependsOlder() != ObjectID)
			{
				THROW_EXCEPTION(BackupStoreException, PatchChainInfoBadInDirectory);
			}
			// Change the info in the newer entry so that this no longer points to this entry
			pnewer->SetDependsOlder(0);
		}
		else if(pentry->GetDependsOlder() != 0)
		{
			BackupStoreDirectory::Entry *polder = rDirectory.FindEntryByID(pentry->GetDependsOlder());
			if(pentry->GetDependsNewer() == 0)
			{
				// There exists an older version which depends on this one. Need to combine the two over that one.

				// Adjust the other entry in the directory
				if(polder == 0 || polder->GetDependsNewer() != ObjectID)
				{
					THROW_EXCEPTION(BackupStoreException, PatchChainInfoBadInDirectory);
				}
				// Change the info in the older entry so that this no longer points to this entry
				polder->SetDependsNewer(0);
			}
			else
			{
				// This entry is in the middle of a chain, and two patches need combining.

				// First, adjust the directory entries
				BackupStoreDirectory::Entry *pnewer = rDirectory.FindEntryByID(pentry->GetDependsNewer());
				if(pnewer == 0 || pnewer->GetDependsOlder() != ObjectID
					|| polder == 0 || polder->GetDependsNewer() != ObjectID)
				{
					THROW_EXCEPTION(BackupStoreException, PatchChainInfoBadInDirectory);
				}
				// Remove the middle entry from the linked list by simply using the values from this entry
				pnewer->SetDependsOlder(pentry->GetDependsOlder());
				polder->SetDependsNewer(pentry->GetDependsNewer());
			}

			// COMMON CODE to both cases

			// Generate the filename of the older version
			std::string objFilenameOlder;
			MakeObjectFilename(pentry->GetDependsOlder(), objFilenameOlder);
			// Open it twice (it's the diff)
			std::auto_ptr<RaidFileRead> pdiff(RaidFileRead::Open(mStoreDiscSet, objFilenameOlder));
			std::auto_ptr<RaidFileRead> pdiff2(RaidFileRead::Open(mStoreDiscSet, objFilenameOlder));
			// Open this file
			std::string objFilename;
			MakeObjectFilename(ObjectID, objFilename);
			std::auto_ptr<RaidFileRead> pobjectBeingDeleted(RaidFileRead::Open(mStoreDiscSet, objFilename));
			// And open a write file to overwrite the other directory entry
			padjustedEntry.reset(new RaidFileWrite(mStoreDiscSet,
				objFilenameOlder, mapNewRefs->GetRefCount(ObjectID)));
			padjustedEntry->Open(true /* allow overwriting */);

			if(pentry->GetDependsNewer() == 0)
			{
				// There exists an older version which depends on this one. Need to combine the two over that one.
				BackupStoreFile::CombineFile(*pdiff, *pdiff2, *pobjectBeingDeleted, *padjustedEntry);
			}
			else
			{
				// This entry is in the middle of a chain, and two patches need combining.
				BackupStoreFile::CombineDiffs(*pobjectBeingDeleted, *pdiff, *pdiff2, *padjustedEntry);
			}
			// The file will be committed later when the directory is safely commited.

			// Work out the adjusted size
			int64_t newSize = padjustedEntry->GetDiscUsageInBlocks();
			int64_t sizeDelta = newSize - polder->GetSizeInBlocks();
			mBlocksUsedDelta += sizeDelta;
			if(polder->IsDeleted())
			{
				mBlocksInDeletedFilesDelta += sizeDelta;
			}
			if(polder->IsOld())
			{
				mBlocksInOldFilesDelta += sizeDelta;
			}
			polder->SetSizeInBlocks(newSize);
		}

		// pentry no longer valid
	}

	// Delete it from the directory
	rDirectory.DeleteEntry(ObjectID);

	// Save directory back to disc
	// BLOCK
	{
		RaidFileWrite writeDir(mStoreDiscSet, rDirectoryFilename,
			mapNewRefs->GetRefCount(InDirectory));
		writeDir.Open(true /* allow overwriting */);
		rDirectory.WriteToStream(writeDir);

		// Get the disc usage (must do this before commiting it)
		int64_t new_size = writeDir.GetDiscUsageInBlocks();

		// Commit directory
		writeDir.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);

		// Adjust block counts if the directory itself changed in size
		int64_t original_size = rDirectory.GetUserInfo1_SizeInBlocks();
		int64_t adjust = new_size - original_size;
		mBlocksUsedDelta += adjust;
		mBlocksInDirectoriesDelta += adjust;

		UpdateDirectorySize(rDirectory, new_size);
	}

	// Commit any new adjusted entry
	if(padjustedEntry.get() != 0)
	{
		padjustedEntry->Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
		padjustedEntry.reset(); // delete it now
	}

	// Drop reference count by one. Must now be zero, to delete the file.
	bool remaining_refs = mapNewRefs->RemoveReference(ObjectID);
	ASSERT(!remaining_refs);

	// Delete from disc
	BOX_TRACE("Removing unreferenced object " <<
		BOX_FORMAT_OBJECTID(ObjectID));
	std::string objFilename;
	MakeObjectFilename(ObjectID, objFilename);
	RaidFileWrite del(mStoreDiscSet, objFilename, mapNewRefs->GetRefCount(ObjectID));
	del.Delete();

	// Adjust counts for the file
	++mFilesDeleted;
	mBlocksUsedDelta -= deletedFileSizeInBlocks;

	if(wasDeleted)
	{
		mBlocksInDeletedFilesDelta -= deletedFileSizeInBlocks;
		rBackupStoreInfo.AdjustNumDeletedFiles(-1);
	}

	if(wasOldVersion)
	{
		mBlocksInOldFilesDelta -= deletedFileSizeInBlocks;
		rBackupStoreInfo.AdjustNumOldFiles(-1);
	}

	// Delete the directory?
	// Do this if... dir has zero entries, and is marked as deleted in it's containing directory
	if(rDirectory.GetNumberOfEntries() == 0)
	{
		// Candidate for deletion
		mEmptyDirectories.push_back(InDirectory);
	}

	return 0;
}
Пример #2
0
// Count valid remaining entries and the number of blocks in them.
void BackupStoreCheck::CountDirectoryEntries(BackupStoreDirectory& dir)
{
	BackupStoreDirectory::Iterator i(dir);
	BackupStoreDirectory::Entry *en = 0;
	while((en = i.Next()) != 0)
	{
		int32_t iIndex;
		IDBlock *piBlock = LookupID(en->GetObjectID(), iIndex);
		bool badEntry = false;
		bool wasAlreadyContained = false;

		ASSERT(piBlock != 0 ||
			mDirsWhichContainLostDirs.find(en->GetObjectID())
			!= mDirsWhichContainLostDirs.end());

		if (piBlock)
		{
			// Normally it would exist and this
			// check would not be necessary, but
			// we might have missing directories
			// that we will recreate later.
			// cf mDirsWhichContainLostDirs.
			uint8_t iflags = GetFlags(piBlock, iIndex);
			wasAlreadyContained = (iflags & Flags_IsContained);
			SetFlags(piBlock, iIndex, iflags | Flags_IsContained);
		}

		if(wasAlreadyContained)
		{
			// don't double-count objects that are
			// contained by another directory as well.
		}
		else if(en->IsDir())
		{
			mNumDirectories++;
		}
		else if(!en->IsFile())
		{
			BOX_TRACE("Not counting object " <<
				BOX_FORMAT_OBJECTID(en->GetObjectID()) <<
				" with flags " << en->GetFlags());
		}
		else // it's a file
		{
			// Add to sizes?
			// If piBlock was zero, then wasAlreadyContained
			// might be uninitialized; but we only process
			// files here, and if a file's piBlock was zero
			// then badEntry would be set above, so we
			// wouldn't be here.
			ASSERT(!badEntry)

			// It can be both old and deleted.
			// If neither, then it's current.
			if(en->IsDeleted())
			{
				mNumDeletedFiles++;
				mBlocksInDeletedFiles += en->GetSizeInBlocks();
			}

			if(en->IsOld())
			{
				mNumOldFiles++;
				mBlocksInOldFiles += en->GetSizeInBlocks();
			}

			if(!en->IsDeleted() && !en->IsOld())
			{
				mNumCurrentFiles++;
				mBlocksInCurrentFiles += en->GetSizeInBlocks();
			}
		}

		mapNewRefs->AddReference(en->GetObjectID());
	}
}
Пример #3
0
// --------------------------------------------------------------------------
//
// Function
//		Name:    HousekeepStoreAccount::ScanDirectory(int64_t)
//		Purpose: Private. Scan a directory for potentially deleteable
//			 items, and add them to the list. Returns true if the
//			 scan should continue.
//		Created: 11/12/03
//
// --------------------------------------------------------------------------
bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID,
	BackupStoreInfo& rBackupStoreInfo)
{
#ifndef WIN32
	if((--mCountUntilNextInterprocessMsgCheck) <= 0)
	{
		mCountUntilNextInterprocessMsgCheck =
			POLL_INTERPROCESS_MSG_CHECK_FREQUENCY;

		// Check for having to stop
		// Include account ID here as the specified account is locked
		if(mpHousekeepingCallback && mpHousekeepingCallback->CheckForInterProcessMsg(mAccountID))
		{
			// Need to abort now
			return false;
		}
	}
#endif

	// Get the filename
	std::string objectFilename;
	MakeObjectFilename(ObjectID, objectFilename);

	// Open it.
	std::auto_ptr<RaidFileRead> dirStream(RaidFileRead::Open(mStoreDiscSet,
		objectFilename));

	// Add the size of the directory on disc to the size being calculated
	int64_t originalDirSizeInBlocks = dirStream->GetDiscUsageInBlocks();
	mBlocksInDirectories += originalDirSizeInBlocks;
	mBlocksUsed += originalDirSizeInBlocks;

	// Read the directory in
	BackupStoreDirectory dir;
	BufferedStream buf(*dirStream);
	dir.ReadFromStream(buf, IOStream::TimeOutInfinite);
	dir.SetUserInfo1_SizeInBlocks(originalDirSizeInBlocks);
	dirStream->Close();

	// Is it empty?
	if(dir.GetNumberOfEntries() == 0)
	{
		// Add it to the list of directories to potentially delete
		mEmptyDirectories.push_back(dir.GetObjectID());
	}

	// Calculate reference counts first, before we start requesting
	// files to be deleted.
	// BLOCK
	{
		BackupStoreDirectory::Iterator i(dir);
		BackupStoreDirectory::Entry *en = 0;

		while((en = i.Next()) != 0)
		{
			// This directory references this object
			mapNewRefs->AddReference(en->GetObjectID());
		}
	}

	// BLOCK
	{
		// Remove any files which are marked for removal as soon
		// as they become old or deleted.
		bool deletedSomething = false;
		do
		{
			// Iterate through the directory
			deletedSomething = false;
			BackupStoreDirectory::Iterator i(dir);
			BackupStoreDirectory::Entry *en = 0;
			while((en = i.Next(BackupStoreDirectory::Entry::Flags_File)) != 0)
			{
				int16_t enFlags = en->GetFlags();
				if((enFlags & BackupStoreDirectory::Entry::Flags_RemoveASAP) != 0
					&& (en->IsDeleted() || en->IsOld()))
				{
					// Delete this immediately.
					DeleteFile(ObjectID, en->GetObjectID(), dir,
						objectFilename, rBackupStoreInfo);
					
					// flag as having done something
					deletedSomething = true;

					// Must start the loop from the beginning again, as iterator is now
					// probably invalid.
					break;
				}
			}
		} while(deletedSomething);
	}

	// BLOCK
	{
		// Add files to the list of potential deletions

		// map to count the distance from the mark
		typedef std::pair<std::string, int32_t> version_t;
		std::map<version_t, int32_t> markVersionAges;
			// map of pair (filename, mark number) -> version age

		// NOTE: use a reverse iterator to allow the distance from mark stuff to work
		BackupStoreDirectory::ReverseIterator i(dir);
		BackupStoreDirectory::Entry *en = 0;

		while((en = i.Next(BackupStoreDirectory::Entry::Flags_File)) != 0)
		{
			// Update recalculated usage sizes
			int16_t enFlags = en->GetFlags();
			int64_t enSizeInBlocks = en->GetSizeInBlocks();
			mBlocksUsed += enSizeInBlocks;
			if(en->IsOld()) mBlocksInOldFiles += enSizeInBlocks;
			if(en->IsDeleted()) mBlocksInDeletedFiles += enSizeInBlocks;

			// Work out ages of this version from the last mark
			int32_t enVersionAge = 0;
			std::map<version_t, int32_t>::iterator enVersionAgeI(
				markVersionAges.find(
					version_t(en->GetName().GetEncodedFilename(),
						en->GetMarkNumber())));
			if(enVersionAgeI != markVersionAges.end())
			{
				enVersionAge = enVersionAgeI->second + 1;
				enVersionAgeI->second = enVersionAge;
			}
			else
			{
				markVersionAges[version_t(en->GetName().GetEncodedFilename(), en->GetMarkNumber())] = enVersionAge;
			}
			// enVersionAge is now the age of this version.

			// Potentially add it to the list if it's deleted, if it's an old version or deleted
			if(en->IsOld() || en->IsDeleted())
			{
				// Is deleted / old version.
				DelEn d;
				d.mObjectID = en->GetObjectID();
				d.mInDirectory = ObjectID;
				d.mSizeInBlocks = en->GetSizeInBlocks();
				d.mMarkNumber = en->GetMarkNumber();
				d.mVersionAgeWithinMark = enVersionAge;
				d.mIsFlagDeleted = en->IsDeleted();

				// Add it to the list
				mPotentialDeletions.insert(d);

				// Update various counts
				mPotentialDeletionsTotalSize += d.mSizeInBlocks;
				if(d.mSizeInBlocks > mMaxSizeInPotentialDeletions) mMaxSizeInPotentialDeletions = d.mSizeInBlocks;

				// Too much in the list of potential deletions?
				// (check against the deletion target + the max size in deletions, so that we never delete things
				// and take the total size below the deletion size target)
				if(mPotentialDeletionsTotalSize > (mDeletionSizeTarget + mMaxSizeInPotentialDeletions))
				{
					int64_t sizeToRemove = mPotentialDeletionsTotalSize - (mDeletionSizeTarget + mMaxSizeInPotentialDeletions);
					bool recalcMaxSize = false;

					while(sizeToRemove > 0)
					{
						// Make iterator for the last element, while checking that there's something there in the first place.
						std::set<DelEn, DelEnCompare>::iterator i(mPotentialDeletions.end());
						if(i != mPotentialDeletions.begin())
						{
							// Nothing left in set
							break;
						}
						// Make this into an iterator pointing to the last element in the set
						--i;

						// Delete this one?
						if(sizeToRemove > i->mSizeInBlocks)
						{
							sizeToRemove -= i->mSizeInBlocks;
							if(i->mSizeInBlocks >= mMaxSizeInPotentialDeletions)
							{
								// Will need to recalculate the maximum size now, because we've just deleted that element
								recalcMaxSize = true;
							}
							mPotentialDeletions.erase(i);
						}
						else
						{
							// Over the size to remove, so stop now
							break;
						}
					}

					if(recalcMaxSize)
					{
						// Because an object which was the maximum size recorded was deleted from the set
						// it's necessary to recalculate this maximum.
						mMaxSizeInPotentialDeletions = 0;
						std::set<DelEn, DelEnCompare>::const_iterator i(mPotentialDeletions.begin());
						for(; i != mPotentialDeletions.end(); ++i)
						{
							if(i->mSizeInBlocks > mMaxSizeInPotentialDeletions)
							{
								mMaxSizeInPotentialDeletions = i->mSizeInBlocks;
							}
						}
					}
				}
			}
		}
	}

	// Recurse into subdirectories
	{
		BackupStoreDirectory::Iterator i(dir);
		BackupStoreDirectory::Entry *en = 0;
		while((en = i.Next(BackupStoreDirectory::Entry::Flags_Dir)) != 0)
		{
			ASSERT(en->IsDir());
			
			if(!ScanDirectory(en->GetObjectID(), rBackupStoreInfo))
			{
				// Halting operation
				return false;
			}
		}
	}

	return true;
}