NS_IMETHODIMP nsIncrementalDownload::OnDataAvailable(nsIRequest *request, nsISupports *context, nsIInputStream *input, PRUint32 offset, PRUint32 count) { while (count) { PRUint32 space = mChunkSize - mChunkLen; PRUint32 n, len = PR_MIN(space, count); nsresult rv = input->Read(mChunk + mChunkLen, len, &n); if (NS_FAILED(rv)) return rv; if (n != len) return NS_ERROR_UNEXPECTED; count -= n; mChunkLen += n; if (mChunkLen == mChunkSize) FlushChunk(); } return NS_OK; }
NS_IMETHODIMP nsIncrementalDownload::OnDataAvailable(nsIRequest *request, nsISupports *context, nsIInputStream *input, uint64_t offset, uint32_t count) { while (count) { uint32_t space = mChunkSize - mChunkLen; uint32_t n, len = std::min(space, count); nsresult rv = input->Read(&mChunk[mChunkLen], len, &n); if (NS_FAILED(rv)) return rv; if (n != len) return NS_ERROR_UNEXPECTED; count -= n; mChunkLen += n; if (mChunkLen == mChunkSize) { rv = FlushChunk(); if (NS_FAILED(rv)) return rv; } } if (PR_Now() > mLastProgressUpdate + UPDATE_PROGRESS_INTERVAL) UpdateProgress(); return NS_OK; }
NS_IMETHODIMP nsIncrementalDownload::OnStopRequest(nsIRequest *request, nsISupports *context, nsresult status) { // Not a real error; just a trick to kill off the channel without our // listener having to care. if (status == NS_ERROR_DOWNLOAD_NOT_PARTIAL) return NS_OK; // Not a real error; just a trick used to suppress OnDataAvailable calls. if (status == NS_ERROR_DOWNLOAD_COMPLETE) status = NS_OK; if (NS_SUCCEEDED(mStatus)) mStatus = status; if (mChunk) { if (NS_SUCCEEDED(mStatus)) mStatus = FlushChunk(); mChunk = nsnull; // deletes memory mChunkLen = 0; } mChannel = nsnull; // Notify listener if we hit an error or finished if (NS_FAILED(mStatus) || mCurrentSize == mTotalSize) { CallOnStopRequest(); return NS_OK; } return StartTimer(mInterval); // Do next chunk }
void TIndexer::Index(const vector<string>& files, const char* idxFile, const char* datFile) { ofstream idxOutput(idxFile); ofstream datOutput(datFile); vector<char> idxBuffer(1 << 13); vector<char> datBuffer(1 << 13); idxOutput.rdbuf()->pubsetbuf(&idxBuffer[0], idxBuffer.size()); datOutput.rdbuf()->pubsetbuf(&datBuffer[0], datBuffer.size()); TDocId filesCount = files.size(); Write(idxOutput, static_cast<uint32_t>(Config.CompressionMethod)); Write(idxOutput, filesCount); for (TDocId i = 0; i < filesCount; ++i) { Write(idxOutput, Offset); Write(datOutput, static_cast<TOffset>(files[i].size())); datOutput << files[i]; Offset += sizeof(TOffset) + files[i].size(); } Chunk.Lists.resize(TRI_COUNT); LastDocs.resize(TRI_COUNT); for (TDocId i = 0; i < filesCount; ++i) Index(i, files[i].c_str(), idxOutput, datOutput); if (Chunk.Size) FlushChunk(idxOutput, datOutput); }
void DataAccumulator::Log2HTML(PWSTR htmlString) { const int TIMEOUT = 250; // 1/4 sec static SYSTEMTIME lastLogSectionTime; // This method is synchronized! WaitForSingleObject(mLogLockEvent, TIMEOUT); { int elapsedSeconds = GetElapsedSecondsFromLastLog(); // If appendee is nearly full or log started yesterday, flush it to disk if (mIsFirstLog || (mCurrentChunkLength > 0.9 * BUFFER_SIZE)) { FlushChunk(); mCurrentChunkLength = 0; } if (elapsedSeconds > 60*5) // 5 min. { if (mPreviousRowExists) { Append2Log(SECTION_END_TAGS); } Append2Log(L"\t\t\t\t<tr>\n\t\t\t\t\t<td>"); SYSTEMTIME time; GetSystemTime(&time); lastLogSectionTime = time; // Rmember time of section opening WCHAR timeS[20];// strlen(12.12.2012 12:12:12\0) == 20 wsprintfW(timeS, L"%hu.%hu.%hu %hu:%hu:%hu", time.wDay, time.wMonth, time.wYear, time.wHour, time.wMinute, time.wSecond); Append2Log(timeS); Append2Log(L"</td>\n\t\t\t\t\t<td>\n\t\t\t\t\t\t<p>"); mPreviousRowExists = true; mPreviousParagraphExists = true; } else if (elapsedSeconds > 60) // 1 min { if (mPreviousParagraphExists) { Append2Log(L"</p>\n"); } Append2Log(L"\t\t\t\t\t\t<p>"); mPreviousParagraphExists = true; } else if (elapsedSeconds > 10) { Append2Log(L"<br>"); } Append2Log(htmlString); } SetEvent(mLogLockEvent); }
void TIndexer::Index(TDocId docId, const char* filename, ostream& idxOutput, ostream& datOutput) { if (Config.Verbose) cerr << "Indexing: " << filename << '\n'; ifstream input(filename); vector<char> buffer(1 << 13); input.rdbuf()->pubsetbuf(&buffer[0], buffer.size()); char chars[4]; chars[4] = 0; if (!input.get(chars[0]) || !input.get(chars[1])) return; vector<bool> used(TRI_COUNT); while (input.get(chars[2])) { TTrigram tri = TByte(chars[0]) | (TByte(chars[1]) << 8) | (TByte(chars[2]) << 16); if (!used[tri]) { Chunk.Add(tri, docId); used[tri] = true; } chars[0] = chars[1]; chars[1] = chars[2]; } if (Chunk.Size >= Config.ChunkSize) FlushChunk(idxOutput, datOutput); }
DataAccumulator::~DataAccumulator(void) { FlushChunk(); // write remaining log info to file delete[] chunk; delete[] mCurrentLogFile; }