NS_IMETHODIMP nsCacheEntryDescriptor:: nsCompressOutputStreamWrapper::Write(const char * buf, uint32_t count, uint32_t * result) { int zerr = Z_OK; nsresult rv = NS_OK; if (!mStreamInitialized) { rv = InitZstream(); if (NS_FAILED(rv)) { return rv; } } if (!mWriteBuffer) { // Once allocated, this buffer is referenced by the zlib stream and // cannot be grown. We use 2x(initial write request) to approximate // a stream buffer size proportional to request buffers. mWriteBufferLen = NS_MAX(count*2, (uint32_t)kMinCompressWriteBufLen); mWriteBuffer = (unsigned char*)nsMemory::Alloc(mWriteBufferLen); if (!mWriteBuffer) { mWriteBufferLen = 0; return NS_ERROR_OUT_OF_MEMORY; } mZstream.next_out = mWriteBuffer; mZstream.avail_out = mWriteBufferLen; } // Compress (deflate) the requested buffer. Keep going // until the entire buffer has been deflated. mZstream.avail_in = count; mZstream.next_in = (Bytef*)buf; while (mZstream.avail_in > 0) { zerr = deflate(&mZstream, Z_NO_FLUSH); if (zerr == Z_STREAM_ERROR) { deflateEnd(&mZstream); mStreamInitialized = false; return NS_ERROR_FAILURE; } // Note: Z_BUF_ERROR is non-fatal and sometimes expected here. // If the compression stream output buffer is filled, write // it out to the underlying stream wrapper. if (mZstream.avail_out == 0) { rv = WriteBuffer(); if (NS_FAILED(rv)) { deflateEnd(&mZstream); mStreamInitialized = false; return rv; } } } *result = count; mUncompressedCount += *result; return NS_OK; }
NS_INTERFACE_MAP_END_THREADSAFE NS_IMETHODIMP nsCacheEntryDescriptor:: nsDecompressInputStreamWrapper::Read(char * buf, uint32_t count, uint32_t *countRead) { mozilla::MutexAutoLock lock(mLock); int zerr = Z_OK; nsresult rv = NS_OK; if (!mStreamInitialized) { rv = InitZstream(); if (NS_FAILED(rv)) { return rv; } } mZstream.next_out = (Bytef*)buf; mZstream.avail_out = count; if (mReadBufferLen < count) { // Allocate a buffer for reading from the input stream. This will // determine the max number of compressed bytes read from the // input stream at one time. Making the buffer size proportional // to the request size is not necessary, but helps minimize the // number of read requests to the input stream. uint32_t newBufLen = std::max(count, (uint32_t)kMinDecompressReadBufLen); unsigned char* newBuf; newBuf = (unsigned char*)nsMemory::Realloc(mReadBuffer, newBufLen); if (newBuf) { mReadBuffer = newBuf; mReadBufferLen = newBufLen; } if (!mReadBuffer) { mReadBufferLen = 0; return NS_ERROR_OUT_OF_MEMORY; } } // read and inflate data until the output buffer is full, or // there is no more data to read while (NS_SUCCEEDED(rv) && zerr == Z_OK && mZstream.avail_out > 0 && count > 0) { if (mZstream.avail_in == 0) { rv = nsInputStreamWrapper::Read_Locked((char*)mReadBuffer, mReadBufferLen, &mZstream.avail_in); if (NS_FAILED(rv) || !mZstream.avail_in) { break; } mZstream.next_in = mReadBuffer; } zerr = inflate(&mZstream, Z_NO_FLUSH); if (zerr == Z_STREAM_END) { // The compressed data may have been stored in multiple // chunks/streams. To allow for this case, re-initialize // the inflate stream and continue decompressing from // the next byte. Bytef * saveNextIn = mZstream.next_in; unsigned int saveAvailIn = mZstream.avail_in; Bytef * saveNextOut = mZstream.next_out; unsigned int saveAvailOut = mZstream.avail_out; inflateReset(&mZstream); mZstream.next_in = saveNextIn; mZstream.avail_in = saveAvailIn; mZstream.next_out = saveNextOut; mZstream.avail_out = saveAvailOut; zerr = Z_OK; } else if (zerr != Z_OK) { rv = NS_ERROR_INVALID_CONTENT_ENCODING; } } if (NS_SUCCEEDED(rv)) { *countRead = count - mZstream.avail_out; } return rv; }