nsresult GetFileReferencesHelper::DispatchAndReturnFileReferences(int32_t* aMemRefCnt, int32_t* aDBRefCnt, int32_t* aSliceRefCnt, bool* aResult) { NS_ASSERTION(NS_IsMainThread(), "Wrong thread!"); QuotaManager* quotaManager = QuotaManager::Get(); NS_ASSERTION(quotaManager, "Shouldn't be null!"); nsresult rv = quotaManager->IOThread()->Dispatch(this, NS_DISPATCH_NORMAL); NS_ENSURE_SUCCESS(rv, rv); mozilla::MutexAutoLock autolock(mMutex); while (mWaiting) { mCondVar.Wait(); } *aMemRefCnt = mMemRefCnt; *aDBRefCnt = mDBRefCnt; *aSliceRefCnt = mSliceRefCnt; *aResult = mResult; return NS_OK; }
nsresult ParentRunnable::OpenCacheFileForWrite() { AssertIsOnIOThread(); MOZ_ASSERT(mState == eReadyToReadMetadata); MOZ_ASSERT(mOpenMode == eOpenForWrite); mFileSize = mWriteParams.mSize; // Kick out the oldest entry in the LRU queue in the metadata. mModuleIndex = mMetadata.mEntries[Metadata::kLastEntry].mModuleIndex; nsCOMPtr<nsIFile> file; nsresult rv = GetCacheFile(mDirectory, mModuleIndex, getter_AddRefs(file)); NS_ENSURE_SUCCESS(rv, rv); QuotaManager* qm = QuotaManager::Get(); MOZ_ASSERT(qm, "We are on the QuotaManager's IO thread"); // Create the QuotaObject before all file IO and keep it alive until caching // completes to get maximum assertion coverage in QuotaManager against // concurrent removal, etc. mQuotaObject = qm->GetQuotaObject(quota::PERSISTENCE_TYPE_TEMPORARY, mGroup, mOrigin, file); NS_ENSURE_STATE(mQuotaObject); if (!mQuotaObject->MaybeUpdateSize(mWriteParams.mSize, /* aTruncate */ false)) { // If the request fails, it might be because mOrigin is using too much // space (MaybeUpdateSize will not evict our own origin since it is // active). Try to make some space by evicting LRU entries until there is // enough space. EvictEntries(mDirectory, mGroup, mOrigin, mWriteParams.mSize, mMetadata); if (!mQuotaObject->MaybeUpdateSize(mWriteParams.mSize, /* aTruncate */ false)) { mResult = JS::AsmJSCache_QuotaExceeded; return NS_ERROR_FAILURE; } } int32_t openFlags = PR_RDWR | PR_TRUNCATE | PR_CREATE_FILE; rv = file->OpenNSPRFileDesc(openFlags, 0644, &mFileDesc); NS_ENSURE_SUCCESS(rv, rv); // Move the mModuleIndex's LRU entry to the recent end of the queue. PodMove(mMetadata.mEntries + 1, mMetadata.mEntries, Metadata::kLastEntry); Metadata::Entry& entry = mMetadata.mEntries[0]; entry.mFastHash = mWriteParams.mFastHash; entry.mNumChars = mWriteParams.mNumChars; entry.mFullHash = mWriteParams.mFullHash; entry.mModuleIndex = mModuleIndex; rv = WriteMetadataFile(mMetadataFile, mMetadata); NS_ENSURE_SUCCESS(rv, rv); return NS_OK; }
bool QuotaObject::MaybeAllocateMoreSpace(int64_t aOffset, int32_t aCount) { int64_t end = aOffset + aCount; QuotaManager* quotaManager = QuotaManager::Get(); NS_ASSERTION(quotaManager, "Shouldn't be null!"); MutexAutoLock lock(quotaManager->mQuotaMutex); if (mSize >= end || !mOriginInfo) { return true; } int64_t newUsage = mOriginInfo->mUsage - mSize + end; if (newUsage > mOriginInfo->mLimit) { // This will block the thread, but it will also drop the mutex while // waiting. The mutex will be reacquired again when the waiting is finished. if (!quotaManager->LockedQuotaIsLifted()) { return false; } // Threads raced, the origin info removal has been done by some other // thread. if (!mOriginInfo) { // The other thread could allocate more space. if (end > mSize) { mSize = end; } return true; } nsCString origin = mOriginInfo->mOrigin; mOriginInfo->LockedClearOriginInfos(); NS_ASSERTION(!mOriginInfo, "Should have cleared in LockedClearOriginInfos!"); quotaManager->mOriginInfos.Remove(origin); // Some other thread could increase the size without blocking (increasing // the origin usage without hitting the limit), but no more than this one. NS_ASSERTION(mSize < end, "This shouldn't happen!"); mSize = end; return true; } mOriginInfo->mUsage = newUsage; mSize = end; return true; }
NS_IMETHODIMP AbortOperationsRunnable::Run() { AssertIsOnBackgroundThread(); if (QuotaManager::IsShuttingDown()) { return NS_OK; } QuotaManager* quotaManager = QuotaManager::Get(); if (!quotaManager) { return NS_OK; } quotaManager->AbortOperationsForProcess(mContentParentId); return NS_OK; }
void DispatchToIOThread() { AssertIsOnOwningThread(); // If shutdown just started, the QuotaManager may have been deleted. QuotaManager* qm = QuotaManager::Get(); if (!qm) { FailOnNonOwningThread(); return; } nsresult rv = qm->IOThread()->Dispatch(this, NS_DISPATCH_NORMAL); if (NS_FAILED(rv)) { FailOnNonOwningThread(); return; } }
nsresult ParentRunnable::OpenCacheFileForRead() { AssertIsOnIOThread(); MOZ_ASSERT(mState == eReadyToOpenCacheFileForRead); MOZ_ASSERT(mOpenMode == eOpenForRead); nsCOMPtr<nsIFile> file; nsresult rv = GetCacheFile(mDirectory, mModuleIndex, getter_AddRefs(file)); NS_ENSURE_SUCCESS(rv, rv); QuotaManager* qm = QuotaManager::Get(); MOZ_ASSERT(qm, "We are on the QuotaManager's IO thread"); // Even though it's not strictly necessary, create the QuotaObject before all // file IO and keep it alive until caching completes to get maximum assertion // coverage in QuotaManager against concurrent removal, etc. mQuotaObject = qm->GetQuotaObject(quota::PERSISTENCE_TYPE_TEMPORARY, mGroup, mOrigin, file); NS_ENSURE_STATE(mQuotaObject); rv = file->GetFileSize(&mFileSize); NS_ENSURE_SUCCESS(rv, rv); int32_t openFlags = PR_RDONLY | nsIFile::OS_READAHEAD; rv = file->OpenNSPRFileDesc(openFlags, 0644, &mFileDesc); NS_ENSURE_SUCCESS(rv, rv); // Move the mModuleIndex's LRU entry to the recent end of the queue. unsigned lruIndex = 0; while (mMetadata.mEntries[lruIndex].mModuleIndex != mModuleIndex) { if (++lruIndex == Metadata::kNumEntries) { return NS_ERROR_UNEXPECTED; } } Metadata::Entry entry = mMetadata.mEntries[lruIndex]; PodMove(mMetadata.mEntries + 1, mMetadata.mEntries, lruIndex); mMetadata.mEntries[0] = entry; rv = WriteMetadataFile(mMetadataFile, mMetadata); NS_ENSURE_SUCCESS(rv, rv); return NS_OK; }
nsresult Client::GetDirectory(const nsACString& aOrigin, nsIFile** aDirectory) { QuotaManager* quotaManager = QuotaManager::Get(); NS_ASSERTION(quotaManager, "This should never fail!"); nsCOMPtr<nsIFile> directory; nsresult rv = quotaManager->GetDirectoryForOrigin(aOrigin, getter_AddRefs(directory)); NS_ENSURE_SUCCESS(rv, rv); NS_ASSERTION(directory, "What?"); rv = directory->Append(NS_LITERAL_STRING(IDB_DIRECTORY_NAME)); NS_ENSURE_SUCCESS(rv, rv); directory.forget(aDirectory); return NS_OK; }
nsresult FileQuotaStream<FileStreamBase>::DoOpen() { QuotaManager* quotaManager = QuotaManager::Get(); NS_ASSERTION(quotaManager, "Shouldn't be null!"); NS_ASSERTION(!mQuotaObject, "Creating quota object more than once?"); mQuotaObject = quotaManager->GetQuotaObject(mPersistenceType, mGroup, mOrigin, FileStreamBase::mOpenParams.localFile); nsresult rv = FileStreamBase::DoOpen(); NS_ENSURE_SUCCESS(rv, rv); if (mQuotaObject && (FileStreamBase::mOpenParams.ioFlags & PR_TRUNCATE)) { mQuotaObject->MaybeUpdateSize(0, /* aTruncate */ true); } return NS_OK; }
void DeleteFilesRunnable::DirectoryLockAcquired(DirectoryLock* aLock) { AssertIsOnBackgroundThread(); MOZ_ASSERT(mState == State_DirectoryOpenPending); MOZ_ASSERT(!mDirectoryLock); mDirectoryLock = aLock; QuotaManager* quotaManager = QuotaManager::Get(); MOZ_ASSERT(quotaManager); // Must set this before dispatching otherwise we will race with the IO thread mState = State_DatabaseWorkOpen; nsresult rv = quotaManager->IOThread()->Dispatch(this, NS_DISPATCH_NORMAL); if (NS_WARN_IF(NS_FAILED(rv))) { Finish(); return; } }
nsresult DeleteFilesRunnable::Open() { AssertIsOnBackgroundThread(); MOZ_ASSERT(mState == State_Initial); QuotaManager* quotaManager = QuotaManager::Get(); if (NS_WARN_IF(!quotaManager)) { return NS_ERROR_FAILURE; } mState = State_DirectoryOpenPending; quotaManager->OpenDirectory(mFileManager->Type(), mFileManager->Group(), mFileManager->Origin(), mFileManager->IsApp(), Client::IDB, /* aExclusive */ false, this); return NS_OK; }
nsresult DeleteFilesRunnable::DeleteFile(int64_t aFileId) { MOZ_ASSERT(mDirectory); MOZ_ASSERT(mJournalDirectory); nsCOMPtr<nsIFile> file = mFileManager->GetFileForId(mDirectory, aFileId); NS_ENSURE_TRUE(file, NS_ERROR_FAILURE); nsresult rv; int64_t fileSize; if (mFileManager->EnforcingQuota()) { rv = file->GetFileSize(&fileSize); NS_ENSURE_SUCCESS(rv, NS_ERROR_FAILURE); } rv = file->Remove(false); NS_ENSURE_SUCCESS(rv, NS_ERROR_FAILURE); if (mFileManager->EnforcingQuota()) { QuotaManager* quotaManager = QuotaManager::Get(); NS_ASSERTION(quotaManager, "Shouldn't be null!"); quotaManager->DecreaseUsageForOrigin(mFileManager->Type(), mFileManager->Group(), mFileManager->Origin(), fileSize); } file = mFileManager->GetFileForId(mJournalDirectory, aFileId); NS_ENSURE_TRUE(file, NS_ERROR_FAILURE); rv = file->Remove(false); NS_ENSURE_SUCCESS(rv, rv); return NS_OK; }
bool QuotaObject::MaybeAllocateMoreSpace(int64_t aOffset, int32_t aCount) { int64_t end = aOffset + aCount; QuotaManager* quotaManager = QuotaManager::Get(); NS_ASSERTION(quotaManager, "Shouldn't be null!"); MutexAutoLock lock(quotaManager->mQuotaMutex); if (mSize >= end || !mOriginInfo) { return true; } GroupInfo* groupInfo = mOriginInfo->mGroupInfo; if (groupInfo->IsForPersistentStorage()) { uint64_t newUsage = mOriginInfo->mUsage - mSize + end; if (newUsage > mOriginInfo->mLimit) { // This will block the thread, but it will also drop the mutex while // waiting. The mutex will be reacquired again when the waiting is // finished. if (!quotaManager->LockedQuotaIsLifted()) { return false; } // Threads raced, the origin info removal has been done by some other // thread. if (!mOriginInfo) { // The other thread could allocate more space. if (end > mSize) { mSize = end; } return true; } nsCString group = mOriginInfo->mGroupInfo->mGroup; nsCString origin = mOriginInfo->mOrigin; mOriginInfo->LockedClearOriginInfos(); NS_ASSERTION(!mOriginInfo, "Should have cleared in LockedClearOriginInfos!"); quotaManager->LockedRemoveQuotaForOrigin(PERSISTENCE_TYPE_PERSISTENT, group, origin); // Some other thread could increase the size without blocking (increasing // the origin usage without hitting the limit), but no more than this one. NS_ASSERTION(mSize < end, "This shouldn't happen!"); mSize = end; return true; } mOriginInfo->mUsage = newUsage; groupInfo->mUsage = groupInfo->mUsage - mSize + end; mSize = end; return true; } NS_ASSERTION(groupInfo->mPersistenceType == PERSISTENCE_TYPE_TEMPORARY, "Huh?"); uint64_t delta = end - mSize; uint64_t newUsage = mOriginInfo->mUsage + delta; // Temporary storage has no limit for origin usage (there's a group and the // global limit though). uint64_t newGroupUsage = groupInfo->mUsage + delta; // Temporary storage has a hard limit for group usage (20 % of the global // limit). if (newGroupUsage > quotaManager->GetGroupLimit()) { return false; } uint64_t newTemporaryStorageUsage = quotaManager->mTemporaryStorageUsage + delta; if (newTemporaryStorageUsage > quotaManager->mTemporaryStorageLimit) { // This will block the thread without holding the lock while waitting. nsAutoTArray<OriginInfo*, 10> originInfos; uint64_t sizeToBeFreed = quotaManager->LockedCollectOriginsForEviction(delta, originInfos); if (!sizeToBeFreed) { return false; } NS_ASSERTION(sizeToBeFreed >= delta, "Huh?"); { MutexAutoUnlock autoUnlock(quotaManager->mQuotaMutex); for (uint32_t i = 0; i < originInfos.Length(); i++) { quotaManager->DeleteTemporaryFilesForOrigin(originInfos[i]->mOrigin); } } // Relocked. NS_ASSERTION(mOriginInfo, "How come?!"); nsTArray<nsCString> origins; for (uint32_t i = 0; i < originInfos.Length(); i++) { OriginInfo* originInfo = originInfos[i]; NS_ASSERTION(originInfo != mOriginInfo, "Deleted itself!"); nsCString group = originInfo->mGroupInfo->mGroup; nsCString origin = originInfo->mOrigin; quotaManager->LockedRemoveQuotaForOrigin(PERSISTENCE_TYPE_TEMPORARY, group, origin); #ifdef DEBUG originInfos[i] = nullptr; #endif origins.AppendElement(origin); } // We unlocked and relocked several times so we need to recompute all the // essential variables and recheck the group limit. delta = end - mSize; newUsage = mOriginInfo->mUsage + delta; newGroupUsage = groupInfo->mUsage + delta; if (newGroupUsage > quotaManager->GetGroupLimit()) { // Unfortunately some other thread increased the group usage in the // meantime and we are not below the group limit anymore. // However, the origin eviction must be finalized in this case too. MutexAutoUnlock autoUnlock(quotaManager->mQuotaMutex); quotaManager->FinalizeOriginEviction(origins); return false; } newTemporaryStorageUsage = quotaManager->mTemporaryStorageUsage + delta; NS_ASSERTION(newTemporaryStorageUsage <= quotaManager->mTemporaryStorageLimit, "How come?!"); // Ok, we successfully freed enough space and the operation can continue // without throwing the quota error. mOriginInfo->mUsage = newUsage; groupInfo->mUsage = newGroupUsage; quotaManager->mTemporaryStorageUsage = newTemporaryStorageUsage;; // Some other thread could increase the size in the meantime, but no more // than this one. NS_ASSERTION(mSize < end, "This shouldn't happen!"); mSize = end; // Finally, release IO thread only objects and allow next synchronized // ops for the evicted origins. MutexAutoUnlock autoUnlock(quotaManager->mQuotaMutex); quotaManager->FinalizeOriginEviction(origins); return true; } mOriginInfo->mUsage = newUsage; groupInfo->mUsage = newGroupUsage; quotaManager->mTemporaryStorageUsage = newTemporaryStorageUsage; mSize = end; return true; }
bool QuotaObject::MaybeUpdateSize(int64_t aSize, bool aTruncate) { QuotaManager* quotaManager = QuotaManager::Get(); MOZ_ASSERT(quotaManager); MutexAutoLock lock(quotaManager->mQuotaMutex); if (mSize == aSize) { return true; } if (!mOriginInfo) { mSize = aSize; return true; } GroupInfo* groupInfo = mOriginInfo->mGroupInfo; MOZ_ASSERT(groupInfo); if (mSize > aSize) { if (aTruncate) { const int64_t delta = mSize - aSize; AssertNoUnderflow(quotaManager->mTemporaryStorageUsage, delta); quotaManager->mTemporaryStorageUsage -= delta; AssertNoUnderflow(groupInfo->mUsage, delta); groupInfo->mUsage -= delta; AssertNoUnderflow(mOriginInfo->mUsage, delta); mOriginInfo->mUsage -= delta; mSize = aSize; } return true; } MOZ_ASSERT(mSize < aSize); nsRefPtr<GroupInfo> complementaryGroupInfo = groupInfo->mGroupInfoPair->LockedGetGroupInfo( ComplementaryPersistenceType(groupInfo->mPersistenceType)); uint64_t delta = aSize - mSize; AssertNoOverflow(mOriginInfo->mUsage, delta); uint64_t newUsage = mOriginInfo->mUsage + delta; // Temporary storage has no limit for origin usage (there's a group and the // global limit though). AssertNoOverflow(groupInfo->mUsage, delta); uint64_t newGroupUsage = groupInfo->mUsage + delta; uint64_t groupUsage = groupInfo->mUsage; if (complementaryGroupInfo) { AssertNoOverflow(groupUsage, complementaryGroupInfo->mUsage); groupUsage += complementaryGroupInfo->mUsage; } // Temporary storage has a hard limit for group usage (20 % of the global // limit). AssertNoOverflow(groupUsage, delta); if (groupUsage + delta > quotaManager->GetGroupLimit()) { return false; } AssertNoOverflow(quotaManager->mTemporaryStorageUsage, delta); uint64_t newTemporaryStorageUsage = quotaManager->mTemporaryStorageUsage + delta; if (newTemporaryStorageUsage > quotaManager->mTemporaryStorageLimit) { // This will block the thread without holding the lock while waitting. nsAutoTArray<OriginInfo*, 10> originInfos; uint64_t sizeToBeFreed = quotaManager->LockedCollectOriginsForEviction(delta, originInfos); if (!sizeToBeFreed) { return false; } NS_ASSERTION(sizeToBeFreed >= delta, "Huh?"); { MutexAutoUnlock autoUnlock(quotaManager->mQuotaMutex); for (uint32_t i = 0; i < originInfos.Length(); i++) { OriginInfo* originInfo = originInfos[i]; quotaManager->DeleteFilesForOrigin( originInfo->mGroupInfo->mPersistenceType, originInfo->mOrigin); } } // Relocked. NS_ASSERTION(mOriginInfo, "How come?!"); nsTArray<OriginParams> origins; for (uint32_t i = 0; i < originInfos.Length(); i++) { OriginInfo* originInfo = originInfos[i]; NS_ASSERTION(originInfo != mOriginInfo, "Deleted itself!"); PersistenceType persistenceType = originInfo->mGroupInfo->mPersistenceType; nsCString group = originInfo->mGroupInfo->mGroup; nsCString origin = originInfo->mOrigin; bool isApp = originInfo->mIsApp; quotaManager->LockedRemoveQuotaForOrigin(persistenceType, group, origin); #ifdef DEBUG originInfos[i] = nullptr; #endif origins.AppendElement(OriginParams(persistenceType, origin, isApp)); } // We unlocked and relocked several times so we need to recompute all the // essential variables and recheck the group limit. AssertNoUnderflow(aSize, mSize); delta = aSize - mSize; AssertNoOverflow(mOriginInfo->mUsage, delta); newUsage = mOriginInfo->mUsage + delta; AssertNoOverflow(groupInfo->mUsage, delta); newGroupUsage = groupInfo->mUsage + delta; groupUsage = groupInfo->mUsage; if (complementaryGroupInfo) { AssertNoOverflow(groupUsage, complementaryGroupInfo->mUsage); groupUsage += complementaryGroupInfo->mUsage; } AssertNoOverflow(groupUsage, delta); if (groupUsage + delta > quotaManager->GetGroupLimit()) { // Unfortunately some other thread increased the group usage in the // meantime and we are not below the group limit anymore. // However, the origin eviction must be finalized in this case too. MutexAutoUnlock autoUnlock(quotaManager->mQuotaMutex); quotaManager->FinalizeOriginEviction(origins); return false; } AssertNoOverflow(quotaManager->mTemporaryStorageUsage, delta); newTemporaryStorageUsage = quotaManager->mTemporaryStorageUsage + delta; NS_ASSERTION(newTemporaryStorageUsage <= quotaManager->mTemporaryStorageLimit, "How come?!"); // Ok, we successfully freed enough space and the operation can continue // without throwing the quota error. mOriginInfo->mUsage = newUsage; groupInfo->mUsage = newGroupUsage; quotaManager->mTemporaryStorageUsage = newTemporaryStorageUsage;; // Some other thread could increase the size in the meantime, but no more // than this one. MOZ_ASSERT(mSize < aSize); mSize = aSize; // Finally, release IO thread only objects and allow next synchronized // ops for the evicted origins. MutexAutoUnlock autoUnlock(quotaManager->mQuotaMutex); quotaManager->FinalizeOriginEviction(origins); return true; } mOriginInfo->mUsage = newUsage; groupInfo->mUsage = newGroupUsage; quotaManager->mTemporaryStorageUsage = newTemporaryStorageUsage; mSize = aSize; return true; }
nsresult ParentRunnable::ReadMetadata() { AssertIsOnIOThread(); MOZ_ASSERT(mState == eReadyToReadMetadata); QuotaManager* qm = QuotaManager::Get(); MOZ_ASSERT(qm, "We are on the QuotaManager's IO thread"); nsresult rv = qm->EnsureOriginIsInitialized(quota::PERSISTENCE_TYPE_TEMPORARY, mSuffix, mGroup, mOrigin, getter_AddRefs(mDirectory)); if (NS_WARN_IF(NS_FAILED(rv))) { mResult = JS::AsmJSCache_StorageInitFailure; return rv; } rv = mDirectory->Append(NS_LITERAL_STRING(ASMJSCACHE_DIRECTORY_NAME)); NS_ENSURE_SUCCESS(rv, rv); bool exists; rv = mDirectory->Exists(&exists); NS_ENSURE_SUCCESS(rv, rv); if (!exists) { rv = mDirectory->Create(nsIFile::DIRECTORY_TYPE, 0755); NS_ENSURE_SUCCESS(rv, rv); } else { DebugOnly<bool> isDirectory; MOZ_ASSERT(NS_SUCCEEDED(mDirectory->IsDirectory(&isDirectory))); MOZ_ASSERT(isDirectory, "Should have caught this earlier!"); } rv = mDirectory->Clone(getter_AddRefs(mMetadataFile)); NS_ENSURE_SUCCESS(rv, rv); rv = mMetadataFile->Append(NS_LITERAL_STRING(ASMJSCACHE_METADATA_FILE_NAME)); NS_ENSURE_SUCCESS(rv, rv); rv = mMetadataFile->Exists(&exists); NS_ENSURE_SUCCESS(rv, rv); if (exists && NS_FAILED(ReadMetadataFile(mMetadataFile, mMetadata))) { exists = false; } if (!exists) { // If we are reading, we can't possibly have a cache hit. if (mOpenMode == eOpenForRead) { return NS_ERROR_FILE_NOT_FOUND; } // Initialize Metadata with a valid empty state for the LRU cache. for (unsigned i = 0; i < Metadata::kNumEntries; i++) { Metadata::Entry& entry = mMetadata.mEntries[i]; entry.mModuleIndex = i; entry.clear(); } } return NS_OK; }