nsresult CacheFile::PadChunkWithZeroes(uint32_t aChunkIdx) { AssertOwnsLock(); // This method is used to pad last incomplete chunk with zeroes or create // a new chunk full of zeroes MOZ_ASSERT(mDataSize / kChunkSize == aChunkIdx); nsresult rv; nsRefPtr<CacheFileChunk> chunk; rv = GetChunkLocked(aChunkIdx, true, nullptr, getter_AddRefs(chunk)); NS_ENSURE_SUCCESS(rv, rv); LOG(("CacheFile::PadChunkWithZeroes() - Zeroing hole in chunk %d, range %d-%d" " [this=%p]", aChunkIdx, chunk->DataSize(), kChunkSize - 1, this)); chunk->EnsureBufSize(kChunkSize); memset(chunk->BufForWriting() + chunk->DataSize(), 0, kChunkSize - chunk->DataSize()); chunk->UpdateDataSize(chunk->DataSize(), kChunkSize - chunk->DataSize(), false); ReleaseOutsideLock(chunk.forget().get()); return NS_OK; }
void CacheFile::ReleaseOutsideLock(nsISupports *aObject) { AssertOwnsLock(); mObjsToRelease.AppendElement(aObject); }
nsresult CacheFile::NotifyChunkListeners(uint32_t aIndex, nsresult aResult, CacheFileChunk *aChunk) { LOG(("CacheFile::NotifyChunkListeners() [this=%p, idx=%d, rv=0x%08x, " "chunk=%p]", this, aIndex, aResult, aChunk)); AssertOwnsLock(); nsresult rv, rv2; ChunkListeners *listeners; mChunkListeners.Get(aIndex, &listeners); MOZ_ASSERT(listeners); rv = NS_OK; for (uint32_t i = 0 ; i < listeners->mItems.Length() ; i++) { ChunkListenerItem *item = listeners->mItems[i]; rv2 = NotifyChunkListener(item->mCallback, item->mTarget, aResult, aIndex, aChunk); if (NS_FAILED(rv2) && NS_SUCCEEDED(rv)) rv = rv2; delete item; } mChunkListeners.Remove(aIndex); return rv; }
nsresult CacheFile::RemoveOutput(CacheFileOutputStream *aOutput) { AssertOwnsLock(); LOG(("CacheFile::RemoveOutput() [this=%p, output=%p]", this, aOutput)); if (mOutput != aOutput) { LOG(("CacheFile::RemoveOutput() - This output was already removed, ignoring" " call [this=%p]", this)); return NS_OK; } mOutput = nullptr; // Cancel all queued chunk and update listeners that cannot be satisfied NotifyListenersAboutOutputRemoval(); if (!mMemoryOnly) WriteMetadataIfNeededLocked(); // Notify close listener as the last action aOutput->NotifyCloseListener(); return NS_OK; }
void CacheFile::PostWriteTimer() { LOG(("CacheFile::PostWriteTimer() [this=%p]", this)); nsresult rv; AssertOwnsLock(); if (mTimer) { if (mTimer->ShouldFireNew()) { LOG(("CacheFile::PostWriteTimer() - Canceling old timer [this=%p]", this)); mTimer->Cancel(); mTimer = nullptr; } else { LOG(("CacheFile::PostWriteTimer() - Keeping old timer [this=%p]", this)); return; } } mTimer = new MetadataWriteTimer(this); rv = mTimer->Fire(); if (NS_FAILED(rv)) { LOG(("CacheFile::PostWriteTimer() - Firing timer failed with error 0x%08x " "[this=%p]", rv, this)); } }
void CacheFile::NotifyListenersAboutOutputRemoval() { LOG(("CacheFile::NotifyListenersAboutOutputRemoval() [this=%p]", this)); AssertOwnsLock(); // First fail all chunk listeners that wait for non-existent chunk mChunkListeners.Enumerate(&CacheFile::FailListenersIfNonExistentChunk, this); // Fail all update listeners mChunks.Enumerate(&CacheFile::FailUpdateListeners, this); }
void CacheFile::WriteMetadataIfNeededLocked(bool aFireAndForget) { // When aFireAndForget is set to true, we are called from dtor. // |this| must not be referenced after this method returns! LOG(("CacheFile::WriteMetadataIfNeededLocked() [this=%p]", this)); nsresult rv; AssertOwnsLock(); MOZ_ASSERT(!mMemoryOnly); if (!mMetadata) { MOZ_CRASH("Must have metadata here"); return; } if (!aFireAndForget) { // if aFireAndForget is set, we are called from dtor. Write // scheduler hard-refers CacheFile otherwise, so we cannot be here. CacheFileIOManager::UnscheduleMetadataWrite(this); } if (NS_FAILED(mStatus)) return; if (!IsDirty() || mOutput || mInputs.Length() || mChunks.Count() || mWritingMetadata || mOpeningFile) return; LOG(("CacheFile::WriteMetadataIfNeededLocked() - Writing metadata [this=%p]", this)); rv = mMetadata->WriteMetadata(mDataSize, aFireAndForget ? nullptr : this); if (NS_SUCCEEDED(rv)) { mWritingMetadata = true; mDataIsDirty = false; } else { LOG(("CacheFile::WriteMetadataIfNeededLocked() - Writing synchronously failed " "[this=%p]", this)); // TODO: close streams with error if (NS_SUCCEEDED(mStatus)) mStatus = rv; } }
void CacheFile::WriteMetadataIfNeeded() { LOG(("CacheFile::WriteMetadataIfNeeded() [this=%p]", this)); nsresult rv; AssertOwnsLock(); MOZ_ASSERT(!mMemoryOnly); if (mTimer) { mTimer->Cancel(); mTimer = nullptr; } if (NS_FAILED(mStatus)) return; if (!IsDirty() || mOutput || mInputs.Length() || mChunks.Count() || mWritingMetadata || mOpeningFile) return; LOG(("CacheFile::WriteMetadataIfNeeded() - Writing metadata [this=%p]", this)); nsRefPtr<MetadataListenerHelper> mlh = new MetadataListenerHelper(this); rv = mMetadata->WriteMetadata(mDataSize, mlh); if (NS_SUCCEEDED(rv)) { mWritingMetadata = true; mDataIsDirty = false; } else { LOG(("CacheFile::WriteMetadataIfNeeded() - Writing synchronously failed " "[this=%p]", this)); // TODO: close streams with error if (NS_SUCCEEDED(mStatus)) mStatus = rv; } }
nsresult CacheFile::QueueChunkListener(uint32_t aIndex, CacheFileChunkListener *aCallback) { LOG(("CacheFile::QueueChunkListener() [this=%p, idx=%d, listener=%p]", this, aIndex, aCallback)); AssertOwnsLock(); MOZ_ASSERT(aCallback); ChunkListenerItem *item = new ChunkListenerItem(); item->mTarget = NS_GetCurrentThread(); item->mCallback = aCallback; ChunkListeners *listeners; if (!mChunkListeners.Get(aIndex, &listeners)) { listeners = new ChunkListeners(); mChunkListeners.Put(aIndex, listeners); } listeners->mItems.AppendElement(item); return NS_OK; }
nsresult CacheFile::GetChunkLocked(uint32_t aIndex, bool aWriter, CacheFileChunkListener *aCallback, CacheFileChunk **_retval) { AssertOwnsLock(); LOG(("CacheFile::GetChunkLocked() [this=%p, idx=%d, writer=%d, listener=%p]", this, aIndex, aWriter, aCallback)); MOZ_ASSERT(mReady); MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile); MOZ_ASSERT((aWriter && !aCallback) || (!aWriter && aCallback)); nsresult rv; nsRefPtr<CacheFileChunk> chunk; if (mChunks.Get(aIndex, getter_AddRefs(chunk))) { LOG(("CacheFile::GetChunkLocked() - Found chunk %p in mChunks [this=%p]", chunk.get(), this)); if (chunk->IsReady() || aWriter) { chunk.swap(*_retval); } else { rv = QueueChunkListener(aIndex, aCallback); NS_ENSURE_SUCCESS(rv, rv); } return NS_OK; } if (mCachedChunks.Get(aIndex, getter_AddRefs(chunk))) { LOG(("CacheFile::GetChunkLocked() - Reusing cached chunk %p [this=%p]", chunk.get(), this)); mChunks.Put(aIndex, chunk); mCachedChunks.Remove(aIndex); chunk->mFile = this; chunk->mRemovingChunk = false; MOZ_ASSERT(chunk->IsReady()); chunk.swap(*_retval); return NS_OK; } int64_t off = aIndex * kChunkSize; if (off < mDataSize) { // We cannot be here if this is memory only entry since the chunk must exist MOZ_ASSERT(!mMemoryOnly); chunk = new CacheFileChunk(this, aIndex); mChunks.Put(aIndex, chunk); LOG(("CacheFile::GetChunkLocked() - Reading newly created chunk %p from " "the disk [this=%p]", chunk.get(), this)); // Read the chunk from the disk rv = chunk->Read(mHandle, std::min(static_cast<uint32_t>(mDataSize - off), static_cast<uint32_t>(kChunkSize)), mMetadata->GetHash(aIndex), this); if (NS_FAILED(rv)) { chunk->mRemovingChunk = true; ReleaseOutsideLock(static_cast<CacheFileChunkListener *>( chunk->mFile.forget().get())); mChunks.Remove(aIndex); NS_ENSURE_SUCCESS(rv, rv); } if (aWriter) { chunk.swap(*_retval); } else { rv = QueueChunkListener(aIndex, aCallback); NS_ENSURE_SUCCESS(rv, rv); } return NS_OK; } else if (off == mDataSize) { if (aWriter) { // this listener is going to write to the chunk chunk = new CacheFileChunk(this, aIndex); mChunks.Put(aIndex, chunk); LOG(("CacheFile::GetChunkLocked() - Created new empty chunk %p [this=%p]", chunk.get(), this)); chunk->InitNew(this); mMetadata->SetHash(aIndex, chunk->Hash()); if (HaveChunkListeners(aIndex)) { rv = NotifyChunkListeners(aIndex, NS_OK, chunk); NS_ENSURE_SUCCESS(rv, rv); } chunk.swap(*_retval); return NS_OK; } } else { if (aWriter) { // this chunk was requested by writer, but we need to fill the gap first // Fill with zero the last chunk if it is incomplete if (mDataSize % kChunkSize) { rv = PadChunkWithZeroes(mDataSize / kChunkSize); NS_ENSURE_SUCCESS(rv, rv); MOZ_ASSERT(!(mDataSize % kChunkSize)); } uint32_t startChunk = mDataSize / kChunkSize; if (mMemoryOnly) { // We need to create all missing CacheFileChunks if this is memory-only // entry for (uint32_t i = startChunk ; i < aIndex ; i++) { rv = PadChunkWithZeroes(i); NS_ENSURE_SUCCESS(rv, rv); } } else { // We don't need to create CacheFileChunk for other empty chunks unless // there is some input stream waiting for this chunk. if (startChunk != aIndex) { // Make sure the file contains zeroes at the end of the file rv = CacheFileIOManager::TruncateSeekSetEOF(mHandle, startChunk * kChunkSize, aIndex * kChunkSize, nullptr); NS_ENSURE_SUCCESS(rv, rv); } for (uint32_t i = startChunk ; i < aIndex ; i++) { if (HaveChunkListeners(i)) { rv = PadChunkWithZeroes(i); NS_ENSURE_SUCCESS(rv, rv); } else { mMetadata->SetHash(i, kEmptyChunkHash); mDataSize = (i + 1) * kChunkSize; } } } MOZ_ASSERT(mDataSize == off); rv = GetChunkLocked(aIndex, true, nullptr, getter_AddRefs(chunk)); NS_ENSURE_SUCCESS(rv, rv); chunk.swap(*_retval); return NS_OK; } } if (mOutput) { // the chunk doesn't exist but mOutput may create it rv = QueueChunkListener(aIndex, aCallback); NS_ENSURE_SUCCESS(rv, rv); } else { return NS_ERROR_NOT_AVAILABLE; } return NS_OK; }