BOOL CADORecordset::AppendChunk(LPCTSTR lpFieldName, LPVOID lpData, UINT nBytes) { FieldPtr pField = m_pRecordset->Fields->GetItem(lpFieldName); return AppendChunk(pField, lpData, nBytes); }
nsresult SourceBuffer::Compact() { mMutex.AssertCurrentThreadOwns(); MOZ_ASSERT(mConsumerCount == 0, "Should have no consumers here"); MOZ_ASSERT(mWaitingConsumers.Length() == 0, "Shouldn't have waiters"); MOZ_ASSERT(mStatus, "Should be complete here"); // Compact our waiting consumers list, since we're complete and no future // consumer will ever have to wait. mWaitingConsumers.Compact(); // If we have no chunks, then there's nothing to compact. if (mChunks.Length() < 1) { return NS_OK; } // If we have one chunk, then we can compact if it has excess capacity. if (mChunks.Length() == 1 && mChunks[0].Length() == mChunks[0].Capacity()) { return NS_OK; } // We can compact our buffer. Determine the total length. size_t length = 0; for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) { length += mChunks[i].Length(); } Maybe<Chunk> newChunk = CreateChunk(length, /* aRoundUp = */ false); if (MOZ_UNLIKELY(!newChunk || newChunk->AllocationFailed())) { NS_WARNING("Failed to allocate chunk for SourceBuffer compacting - OOM?"); return NS_OK; } // Copy our old chunks into the new chunk. for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) { size_t offset = newChunk->Length(); MOZ_ASSERT(offset < newChunk->Capacity()); MOZ_ASSERT(offset + mChunks[i].Length() <= newChunk->Capacity()); memcpy(newChunk->Data() + offset, mChunks[i].Data(), mChunks[i].Length()); newChunk->AddLength(mChunks[i].Length()); } MOZ_ASSERT(newChunk->Length() == newChunk->Capacity(), "Compacted chunk has slack space"); // Replace the old chunks with the new, compact chunk. mChunks.Clear(); if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(Move(newChunk))))) { return HandleError(NS_ERROR_OUT_OF_MEMORY); } mChunks.Compact(); return NS_OK; }
BOOL CADORecordset::AppendChunk(int nIndex, LPVOID lpData, UINT nBytes) { _variant_t vtIndex; vtIndex.vt = VT_I2; vtIndex.iVal = nIndex; FieldPtr pField = m_pRecordset->Fields->GetItem(vtIndex); return AppendChunk(pField, lpData, nBytes); }
BOOL CADORecordset::AppendChunk(LPCTSTR lpFieldName, LPVOID lpData, UINT nBytes) { FieldPtr pField; try { pField = m_pRecordset->Fields->GetItem(lpFieldName); } catch(_com_error &e) { dump_com_error(e); return FALSE; } return AppendChunk(pField, lpData, nBytes); }
nsresult SourceBuffer::ExpectLength(size_t aExpectedLength) { MOZ_ASSERT(aExpectedLength > 0, "Zero expected size?"); MutexAutoLock lock(mMutex); if (MOZ_UNLIKELY(mStatus)) { MOZ_ASSERT_UNREACHABLE("ExpectLength after SourceBuffer is complete"); return NS_OK; } if (MOZ_UNLIKELY(mChunks.Length() > 0)) { MOZ_ASSERT_UNREACHABLE("Duplicate or post-Append call to ExpectLength"); return NS_OK; } if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(aExpectedLength))))) { return HandleError(NS_ERROR_OUT_OF_MEMORY); } return NS_OK; }
nsresult SourceBuffer::Append(const char* aData, size_t aLength) { MOZ_ASSERT(aData, "Should have a buffer"); MOZ_ASSERT(aLength > 0, "Writing a zero-sized chunk"); size_t currentChunkCapacity = 0; size_t currentChunkLength = 0; char* currentChunkData = nullptr; size_t currentChunkRemaining = 0; size_t forCurrentChunk = 0; size_t forNextChunk = 0; size_t nextChunkCapacity = 0; { MutexAutoLock lock(mMutex); if (MOZ_UNLIKELY(mStatus)) { // This SourceBuffer is already complete; ignore further data. return NS_ERROR_FAILURE; } if (MOZ_UNLIKELY(mChunks.Length() == 0)) { if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(aLength))))) { return HandleError(NS_ERROR_OUT_OF_MEMORY); } } // Copy out the current chunk's information so we can release the lock. // Note that this wouldn't be safe if multiple producers were allowed! Chunk& currentChunk = mChunks.LastElement(); currentChunkCapacity = currentChunk.Capacity(); currentChunkLength = currentChunk.Length(); currentChunkData = currentChunk.Data(); // Partition this data between the current chunk and the next chunk. // (Because we always allocate a chunk big enough to fit everything passed // to Append, we'll never need more than those two chunks to store // everything.) currentChunkRemaining = currentChunkCapacity - currentChunkLength; forCurrentChunk = min(aLength, currentChunkRemaining); forNextChunk = aLength - forCurrentChunk; // If we'll need another chunk, determine what its capacity should be while // we still hold the lock. nextChunkCapacity = forNextChunk > 0 ? FibonacciCapacityWithMinimum(forNextChunk) : 0; } // Write everything we can fit into the current chunk. MOZ_ASSERT(currentChunkLength + forCurrentChunk <= currentChunkCapacity); memcpy(currentChunkData + currentChunkLength, aData, forCurrentChunk); // If there's something left, create a new chunk and write it there. Maybe<Chunk> nextChunk; if (forNextChunk > 0) { MOZ_ASSERT(nextChunkCapacity >= forNextChunk, "Next chunk too small?"); nextChunk = CreateChunk(nextChunkCapacity); if (MOZ_LIKELY(nextChunk && !nextChunk->AllocationFailed())) { memcpy(nextChunk->Data(), aData + forCurrentChunk, forNextChunk); nextChunk->AddLength(forNextChunk); } } // Update shared data structures. { MutexAutoLock lock(mMutex); // Update the length of the current chunk. Chunk& currentChunk = mChunks.LastElement(); MOZ_ASSERT(currentChunk.Data() == currentChunkData, "Multiple producers?"); MOZ_ASSERT(currentChunk.Length() == currentChunkLength, "Multiple producers?"); currentChunk.AddLength(forCurrentChunk); // If we created a new chunk, add it to the series. if (forNextChunk > 0) { if (MOZ_UNLIKELY(!nextChunk)) { return HandleError(NS_ERROR_OUT_OF_MEMORY); } if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(Move(nextChunk))))) { return HandleError(NS_ERROR_OUT_OF_MEMORY); } } // Resume any waiting readers now that there's new data. ResumeWaitingConsumers(); } return NS_OK; }