void LLPumpIO::callback() { LLMemType m1(LLMemType::MTYPE_IO_PUMP); //llinfos << "LLPumpIO::callback()" << llendl; if(true) { #if LL_THREADS_APR LLScopedLock lock(mCallbackMutex); #endif std::copy( mPendingCallbacks.begin(), mPendingCallbacks.end(), std::back_insert_iterator<callbacks_t>(mCallbacks)); mPendingCallbacks.clear(); } if(!mCallbacks.empty()) { callbacks_t::iterator it = mCallbacks.begin(); callbacks_t::iterator end = mCallbacks.end(); for(; it != end; ++it) { // it's always the first and last time for respone chains (*it).mHead = (*it).mChainLinks.begin(); (*it).mInit = true; (*it).mEOS = true; processChain(*it); } mCallbacks.clear(); } }
/** * \fn static void* threadStart(void* handle) * \brief start the filter thread * \param handle the stream to use the filters on */ static void* threadStart(void* handle) { BufferedWriter* self = (BufferedWriter*)handle; BufferChain* chain = self->firstChain; while (self->active) { oml_lock(&self->lock, "bufferedWriter"); pthread_cond_wait(&self->semaphore, &self->lock); // Process all chains which have data in them while(1) { if (mbuf_message(chain->mbuf) > mbuf_rdptr(chain->mbuf)) { // got something to read from this chain while (!processChain(self, chain)); } // stop if we caught up to the writer if (chain == self->writerChain) break; chain = chain->next; } oml_unlock(&self->lock, "bufferedWriter"); } return NULL; }
void LLPumpIO::callback() { //LL_INFOS() << "LLPumpIO::callback()" << LL_ENDL; if(true) { #if LL_THREADS_APR LLScopedLock lock(mCallbackMutex); #endif std::copy( mPendingCallbacks.begin(), mPendingCallbacks.end(), std::back_insert_iterator<callbacks_t>(mCallbacks)); mPendingCallbacks.clear(); } if(!mCallbacks.empty()) { callbacks_t::iterator it = mCallbacks.begin(); callbacks_t::iterator end = mCallbacks.end(); for(; it != end; ++it) { LL_RECORD_BLOCK_TIME(FTM_PUMP_CALLBACK_CHAIN); // it's always the first and last time for respone chains (*it).mHead = (*it).mChainLinks.begin(); (*it).mInit = true; (*it).mEOS = true; processChain(*it); } mCallbacks.clear(); } }
//timeout is in microseconds void LLPumpIO::pump(const S32& poll_timeout) { LLMemType m1(LLMemType::MTYPE_IO_PUMP); LLFastTimer t1(LLFastTimer::FTM_PUMP); //llinfos << "LLPumpIO::pump()" << llendl; // Run any pending runners. mRunner.run(); // We need to move all of the pending heads over to the running // chains. PUMP_DEBUG; if(true) { #if LL_THREADS_APR LLScopedLock lock(mChainsMutex); #endif // bail if this pump is paused. if(PAUSING == mState) { mState = PAUSED; } if(PAUSED == mState) { return; } PUMP_DEBUG; // Move the pending chains over to the running chaings if(!mPendingChains.empty()) { PUMP_DEBUG; //lldebugs << "Pushing " << mPendingChains.size() << "." << llendl; std::copy( mPendingChains.begin(), mPendingChains.end(), std::back_insert_iterator<running_chains_t>(mRunningChains)); mPendingChains.clear(); PUMP_DEBUG; } // Clear any locks. This needs to be done here so that we do // not clash during a call to clearLock(). if(!mClearLocks.empty()) { PUMP_DEBUG; running_chains_t::iterator it = mRunningChains.begin(); running_chains_t::iterator end = mRunningChains.end(); std::set<S32>::iterator not_cleared = mClearLocks.end(); for(; it != end; ++it) { if((*it).mLock && mClearLocks.find((*it).mLock) != not_cleared) { (*it).mLock = 0; } } PUMP_DEBUG; mClearLocks.clear(); } } PUMP_DEBUG; // rebuild the pollset if necessary if(mRebuildPollset) { PUMP_DEBUG; rebuildPollset(); mRebuildPollset = false; } // Poll based on the last known pollset // *TODO: may want to pass in a poll timeout so it works correctly // in single and multi threaded processes. PUMP_DEBUG; typedef std::map<S32, S32> signal_client_t; signal_client_t signalled_client; const apr_pollfd_t* poll_fd = NULL; if(mPollset) { PUMP_DEBUG; //llinfos << "polling" << llendl; S32 count = 0; S32 client_id = 0; { LLPerfBlock polltime("pump_poll"); apr_pollset_poll(mPollset, poll_timeout, &count, &poll_fd); } PUMP_DEBUG; for(S32 ii = 0; ii < count; ++ii) { ll_debug_poll_fd("Signalled pipe", &poll_fd[ii]); client_id = *((S32*)poll_fd[ii].client_data); signalled_client[client_id] = ii; } PUMP_DEBUG; } PUMP_DEBUG; // set up for a check to see if each one was signalled signal_client_t::iterator not_signalled = signalled_client.end(); // Process everything as appropriate //lldebugs << "Running chain count: " << mRunningChains.size() << llendl; running_chains_t::iterator run_chain = mRunningChains.begin(); bool process_this_chain = false; for(; run_chain != mRunningChains.end(); ) { PUMP_DEBUG; if((*run_chain).mInit && (*run_chain).mTimer.getStarted() && (*run_chain).mTimer.hasExpired()) { PUMP_DEBUG; if(handleChainError(*run_chain, LLIOPipe::STATUS_EXPIRED)) { // the pipe probably handled the error. If the handler // forgot to reset the expiration then we need to do // that here. if((*run_chain).mTimer.getStarted() && (*run_chain).mTimer.hasExpired()) { PUMP_DEBUG; llinfos << "Error handler forgot to reset timeout. " << "Resetting to " << DEFAULT_CHAIN_EXPIRY_SECS << " seconds." << llendl; (*run_chain).setTimeoutSeconds(DEFAULT_CHAIN_EXPIRY_SECS); } } else { PUMP_DEBUG; // it timed out and no one handled it, so we need to // retire the chain #if LL_DEBUG_PIPE_TYPE_IN_PUMP lldebugs << "Removing chain " << (*run_chain).mChainLinks[0].mPipe << " '" << typeid(*((*run_chain).mChainLinks[0].mPipe)).name() << "' because it timed out." << llendl; #else // lldebugs << "Removing chain " // << (*run_chain).mChainLinks[0].mPipe // << " because we reached the end." << llendl; #endif run_chain = mRunningChains.erase(run_chain); continue; } } PUMP_DEBUG; if((*run_chain).mLock) { ++run_chain; continue; } PUMP_DEBUG; mCurrentChain = run_chain; if((*run_chain).mDescriptors.empty()) { // if there are no conditionals, just process this chain. process_this_chain = true; //lldebugs << "no conditionals - processing" << llendl; } else { PUMP_DEBUG; //lldebugs << "checking conditionals" << llendl; // Check if this run chain was signalled. If any file // descriptor is ready for something, then go ahead and // process this chian. process_this_chain = false; if(!signalled_client.empty()) { PUMP_DEBUG; LLChainInfo::conditionals_t::iterator it; it = (*run_chain).mDescriptors.begin(); LLChainInfo::conditionals_t::iterator end; end = (*run_chain).mDescriptors.end(); S32 client_id = 0; signal_client_t::iterator signal; for(; it != end; ++it) { PUMP_DEBUG; client_id = *((S32*)((*it).second.client_data)); signal = signalled_client.find(client_id); if (signal == not_signalled) continue; static const apr_int16_t POLL_CHAIN_ERROR = APR_POLLHUP | APR_POLLNVAL | APR_POLLERR; const apr_pollfd_t* poll = &(poll_fd[(*signal).second]); if(poll->rtnevents & POLL_CHAIN_ERROR) { // Potential eror condition has been // returned. If HUP was one of them, we pass // that as the error even though there may be // more. If there are in fact more errors, // we'll just wait for that detection until // the next pump() cycle to catch it so that // the logic here gets no more strained than // it already is. LLIOPipe::EStatus error_status; if(poll->rtnevents & APR_POLLHUP) error_status = LLIOPipe::STATUS_LOST_CONNECTION; else error_status = LLIOPipe::STATUS_ERROR; if(handleChainError(*run_chain, error_status)) break; ll_debug_poll_fd("Removing pipe", poll); llwarns << "Removing pipe " << (*run_chain).mChainLinks[0].mPipe << " '" #if LL_DEBUG_PIPE_TYPE_IN_PUMP << typeid( *((*run_chain).mChainLinks[0].mPipe)).name() #endif << "' because: " << events_2_string(poll->rtnevents) << llendl; (*run_chain).mHead = (*run_chain).mChainLinks.end(); break; } // at least 1 fd got signalled, and there were no // errors. That means we process this chain. process_this_chain = true; break; } } } if(process_this_chain) { PUMP_DEBUG; if(!((*run_chain).mInit)) { (*run_chain).mHead = (*run_chain).mChainLinks.begin(); (*run_chain).mInit = true; } PUMP_DEBUG; processChain(*run_chain); } PUMP_DEBUG; if((*run_chain).mHead == (*run_chain).mChainLinks.end()) { #if LL_DEBUG_PIPE_TYPE_IN_PUMP lldebugs << "Removing chain " << (*run_chain).mChainLinks[0].mPipe << " '" << typeid(*((*run_chain).mChainLinks[0].mPipe)).name() << "' because we reached the end." << llendl; #else // lldebugs << "Removing chain " << (*run_chain).mChainLinks[0].mPipe // << " because we reached the end." << llendl; #endif PUMP_DEBUG; // This chain is done. Clean up any allocated memory and // erase the chain info. std::for_each( (*run_chain).mDescriptors.begin(), (*run_chain).mDescriptors.end(), ll_delete_apr_pollset_fd_client_data()); run_chain = mRunningChains.erase(run_chain); // *NOTE: may not always need to rebuild the pollset. mRebuildPollset = true; } else { PUMP_DEBUG; // this chain needs more processing - just go to the next // chain. ++run_chain; } } PUMP_DEBUG; // null out the chain mCurrentChain = mRunningChains.end(); END_PUMP_DEBUG; }
//timeout is in microseconds void LLPumpIO::pump(const S32& poll_timeout) { LLMemType m1(LLMemType::MTYPE_IO_PUMP); LLFastTimer t1(LLFastTimer::FTM_PUMP); //llinfos << "LLPumpIO::pump()" << llendl; // Run any pending runners. mRunner.run(); // We need to move all of the pending heads over to the running // chains. PUMP_DEBUG; if(true) { #if LL_THREADS_APR LLScopedLock lock(mChainsMutex); #endif // bail if this pump is paused. if(PAUSING == mState) { mState = PAUSED; } if(PAUSED == mState) { return; } PUMP_DEBUG; // Move the pending chains over to the running chaings if(!mPendingChains.empty()) { PUMP_DEBUG; //lldebugs << "Pushing " << mPendingChains.size() << "." << llendl; std::copy( mPendingChains.begin(), mPendingChains.end(), std::back_insert_iterator<running_chains_t>(mRunningChains)); mPendingChains.clear(); PUMP_DEBUG; } // Clear any locks. This needs to be done here so that we do // not clash during a call to clearLock(). if(!mClearLocks.empty()) { PUMP_DEBUG; running_chains_t::iterator it = mRunningChains.begin(); running_chains_t::iterator end = mRunningChains.end(); std::set<S32>::iterator not_cleared = mClearLocks.end(); for(; it != end; ++it) { if((*it).mLock && mClearLocks.find((*it).mLock) != not_cleared) { (*it).mLock = 0; } } PUMP_DEBUG; mClearLocks.clear(); } } PUMP_DEBUG; // rebuild the pollset if necessary if(mRebuildPollset) { PUMP_DEBUG; rebuildPollset(); mRebuildPollset = false; } // Poll based on the last known pollset // *FIX: may want to pass in a poll timeout so it works correctly // in single and multi threaded processes. PUMP_DEBUG; typedef std::set<S32> signal_client_t; signal_client_t signalled_client; if(mPollset) { PUMP_DEBUG; //llinfos << "polling" << llendl; S32 count = 0; S32 client_id = 0; const apr_pollfd_t* poll_fd = NULL; apr_pollset_poll(mPollset, poll_timeout, &count, &poll_fd); PUMP_DEBUG; for(S32 i = 0; i < count; ++i) { client_id = *((S32*)poll_fd[i].client_data); signalled_client.insert(client_id); } PUMP_DEBUG; } PUMP_DEBUG; // set up for a check to see if each one was signalled signal_client_t::iterator not_signalled = signalled_client.end(); // Process everything as appropriate //lldebugs << "Running chain count: " << mRunningChains.size() << llendl; running_chains_t::iterator run_chain = mRunningChains.begin(); bool process_this_chain = false; for(; run_chain != mRunningChains.end(); ) { PUMP_DEBUG; if((*run_chain).mInit && (*run_chain).mTimer.getStarted() && (*run_chain).mTimer.hasExpired()) { PUMP_DEBUG; if(handleChainError(*run_chain, LLIOPipe::STATUS_EXPIRED)) { // the pipe probably handled the error. If the handler // forgot to reset the expiration then we need to do // that here. if((*run_chain).mTimer.getStarted() && (*run_chain).mTimer.hasExpired()) { PUMP_DEBUG; llinfos << "Error handler forgot to reset timeout. " << "Resetting to " << DEFAULT_CHAIN_EXPIRY_SECS << " seconds." << llendl; (*run_chain).setTimeoutSeconds(DEFAULT_CHAIN_EXPIRY_SECS); } } else { PUMP_DEBUG; // it timed out and no one handled it, so we need to // retire the chain #if LL_DEBUG_PIPE_TYPE_IN_PUMP lldebugs << "Removing chain " << (*run_chain).mChainLinks[0].mPipe << " '" << typeid(*((*run_chain).mChainLinks[0].mPipe)).name() << "' because it timed out." << llendl; #else // lldebugs << "Removing chain " // << (*run_chain).mChainLinks[0].mPipe // << " because we reached the end." << llendl; #endif run_chain = mRunningChains.erase(run_chain); continue; } } PUMP_DEBUG; if((*run_chain).mLock) { ++run_chain; continue; } PUMP_DEBUG; mCurrentChain = run_chain; if((*run_chain).mDescriptors.empty()) { // if there are no conditionals, just process this chain. process_this_chain = true; //lldebugs << "no conditionals - processing" << llendl; } else { PUMP_DEBUG; //lldebugs << "checking conditionals" << llendl; // Check if this run chain was signalled. If any file // descriptor is ready for something, then go ahead and // process this chian. process_this_chain = false; if(!signalled_client.empty()) { PUMP_DEBUG; LLChainInfo::conditionals_t::iterator it; it = (*run_chain).mDescriptors.begin(); LLChainInfo::conditionals_t::iterator end; end = (*run_chain).mDescriptors.end(); S32 client_id = 0; for(; it != end; ++it) { PUMP_DEBUG; client_id = *((S32*)((*it).second.client_data)); if(signalled_client.find(client_id) != not_signalled) { process_this_chain = true; break; } //llinfos << "no fd ready for this one." << llendl; } } } if(process_this_chain) { PUMP_DEBUG; if(!((*run_chain).mInit)) { (*run_chain).mHead = (*run_chain).mChainLinks.begin(); (*run_chain).mInit = true; } PUMP_DEBUG; processChain(*run_chain); } PUMP_DEBUG; if((*run_chain).mHead == (*run_chain).mChainLinks.end()) { #if LL_DEBUG_PIPE_TYPE_IN_PUMP lldebugs << "Removing chain " << (*run_chain).mChainLinks[0].mPipe << " '" << typeid(*((*run_chain).mChainLinks[0].mPipe)).name() << "' because we reached the end." << llendl; #else // lldebugs << "Removing chain " << (*run_chain).mChainLinks[0].mPipe // << " because we reached the end." << llendl; #endif PUMP_DEBUG; // This chain is done. Clean up any allocated memory and // erase the chain info. std::for_each( (*run_chain).mDescriptors.begin(), (*run_chain).mDescriptors.end(), ll_delete_apr_pollset_fd_client_data()); run_chain = mRunningChains.erase(run_chain); // *NOTE: may not always need to rebuild the pollset. mRebuildPollset = true; } else { PUMP_DEBUG; // this chain needs more processing - just go to the next // chain. ++run_chain; } } PUMP_DEBUG; // null out the chain mCurrentChain = current_chain_t(); END_PUMP_DEBUG; }