Esempio n. 1
0
S32 LLPumpIO::setLock()
{
	// *NOTE: I do not think it is necessary to acquire a mutex here
	// since this should only be called during the pump(), and should
	// only change the running chain. Any other use of this method is
	// incorrect usage. If it becomes necessary to acquire a lock
	// here, be sure to lock here and call a protected method to get
	// the lock, and sleepChain() should probably acquire the same
	// lock while and calling the same protected implementation to
	// lock the runner at the same time.

	// If no chain is running, return failure.
	if(current_chain_t() == mCurrentChain)
	{
		return 0;
	}

	// deal with wrap.
	if(++mNextLock <= 0)
	{
		mNextLock = 1;
	}

	// set the lock
	(*mCurrentChain).mLock = mNextLock;
	return mNextLock;
}
Esempio n. 2
0
bool LLPumpIO::setTimeoutSeconds(F32 timeout)
{
	// If no chain is running, return failure.
	if(current_chain_t() == mCurrentChain)
	{
		return false;
	}
	(*mCurrentChain).setTimeoutSeconds(timeout);
	return true;
}
Esempio n. 3
0
bool LLPumpIO::copyCurrentLinkInfo(links_t& links) const
{
	LLMemType m1(LLMemType::MTYPE_IO_PUMP);
	if(current_chain_t() == mCurrentChain)
	{
		return false;
	}
	std::copy(
		(*mCurrentChain).mChainLinks.begin(),
		(*mCurrentChain).mChainLinks.end(),
		std::back_insert_iterator<links_t>(links));
	return true;
}
Esempio n. 4
0
void LLPumpIO::adjustTimeoutSeconds(F32 delta)
{
	// If no chain is running, bail
	if(current_chain_t() == mCurrentChain) return;
	(*mCurrentChain).adjustTimeoutSeconds(delta);
}
Esempio n. 5
0
//timeout is in microseconds
void LLPumpIO::pump(const S32& poll_timeout)
{
	LLMemType m1(LLMemType::MTYPE_IO_PUMP);
	LLFastTimer t1(LLFastTimer::FTM_PUMP);
	//llinfos << "LLPumpIO::pump()" << llendl;

	// Run any pending runners.
	mRunner.run();

	// We need to move all of the pending heads over to the running
	// chains.
	PUMP_DEBUG;
	if(true)
	{
#if LL_THREADS_APR
		LLScopedLock lock(mChainsMutex);
#endif
		// bail if this pump is paused.
		if(PAUSING == mState)
		{
			mState = PAUSED;
		}
		if(PAUSED == mState)
		{
			return;
		}

		PUMP_DEBUG;
		// Move the pending chains over to the running chaings
		if(!mPendingChains.empty())
		{
			PUMP_DEBUG;
			//lldebugs << "Pushing " << mPendingChains.size() << "." << llendl;
			std::copy(
				mPendingChains.begin(),
				mPendingChains.end(),
				std::back_insert_iterator<running_chains_t>(mRunningChains));
			mPendingChains.clear();
			PUMP_DEBUG;
		}

		// Clear any locks. This needs to be done here so that we do
		// not clash during a call to clearLock().
		if(!mClearLocks.empty())
		{
			PUMP_DEBUG;
			running_chains_t::iterator it = mRunningChains.begin();
			running_chains_t::iterator end = mRunningChains.end();
			std::set<S32>::iterator not_cleared = mClearLocks.end();
			for(; it != end; ++it)
			{
				if((*it).mLock && mClearLocks.find((*it).mLock) != not_cleared)
				{
					(*it).mLock = 0;
				}
			}
			PUMP_DEBUG;
			mClearLocks.clear();
		}
	}

	PUMP_DEBUG;
	// rebuild the pollset if necessary
	if(mRebuildPollset)
	{
		PUMP_DEBUG;
		rebuildPollset();
		mRebuildPollset = false;
	}

	// Poll based on the last known pollset
	// *TODO: may want to pass in a poll timeout so it works correctly
	// in single and multi threaded processes.
	PUMP_DEBUG;
	typedef std::map<S32, S32> signal_client_t;
	signal_client_t signalled_client;
	const apr_pollfd_t* poll_fd = NULL;
	if(mPollset)
	{
		PUMP_DEBUG;
		//llinfos << "polling" << llendl;
		S32 count = 0;
		S32 client_id = 0;
		apr_pollset_poll(mPollset, poll_timeout, &count, &poll_fd);
		PUMP_DEBUG;
		for(S32 ii = 0; ii < count; ++ii)
		{
			ll_debug_poll_fd("Signalled pipe", &poll_fd[ii]);
			client_id = *((S32*)poll_fd[ii].client_data);
			signalled_client[client_id] = ii;
		}
		PUMP_DEBUG;
	}

	PUMP_DEBUG;
	// set up for a check to see if each one was signalled
	signal_client_t::iterator not_signalled = signalled_client.end();

	// Process everything as appropriate
	//lldebugs << "Running chain count: " << mRunningChains.size() << llendl;
	running_chains_t::iterator run_chain = mRunningChains.begin();
	bool process_this_chain = false;
	for(; run_chain != mRunningChains.end(); )
	{
		PUMP_DEBUG;
		if((*run_chain).mInit
		   && (*run_chain).mTimer.getStarted()
		   && (*run_chain).mTimer.hasExpired())
		{
			PUMP_DEBUG;
			if(handleChainError(*run_chain, LLIOPipe::STATUS_EXPIRED))
			{
				// the pipe probably handled the error. If the handler
				// forgot to reset the expiration then we need to do
				// that here.
				if((*run_chain).mTimer.getStarted()
				   && (*run_chain).mTimer.hasExpired())
				{
					PUMP_DEBUG;
					llinfos << "Error handler forgot to reset timeout. "
							<< "Resetting to " << DEFAULT_CHAIN_EXPIRY_SECS
							<< " seconds." << llendl;
					(*run_chain).setTimeoutSeconds(DEFAULT_CHAIN_EXPIRY_SECS);
				}
			}
			else
			{
				PUMP_DEBUG;
				// it timed out and no one handled it, so we need to
				// retire the chain
#if LL_DEBUG_PIPE_TYPE_IN_PUMP
				lldebugs << "Removing chain "
						<< (*run_chain).mChainLinks[0].mPipe
						<< " '"
						<< typeid(*((*run_chain).mChainLinks[0].mPipe)).name()
						<< "' because it timed out." << llendl;
#else
//				lldebugs << "Removing chain "
//						<< (*run_chain).mChainLinks[0].mPipe
//						<< " because we reached the end." << llendl;
#endif
				run_chain = mRunningChains.erase(run_chain);
				continue;
			}
		}
		PUMP_DEBUG;
		if((*run_chain).mLock)
		{
			++run_chain;
			continue;
		}
		PUMP_DEBUG;
		mCurrentChain = run_chain;
		if((*run_chain).mDescriptors.empty())
		{
			// if there are no conditionals, just process this chain.
			process_this_chain = true;
			//lldebugs << "no conditionals - processing" << llendl;
		}
		else
		{
			PUMP_DEBUG;
			//lldebugs << "checking conditionals" << llendl;
			// Check if this run chain was signalled. If any file
			// descriptor is ready for something, then go ahead and
			// process this chian.
			process_this_chain = false;
			if(!signalled_client.empty())
			{
				PUMP_DEBUG;
				LLChainInfo::conditionals_t::iterator it;
				it = (*run_chain).mDescriptors.begin();
				LLChainInfo::conditionals_t::iterator end;
				end = (*run_chain).mDescriptors.end();
				S32 client_id = 0;
				signal_client_t::iterator signal;
				for(; it != end; ++it)
				{
					PUMP_DEBUG;
					client_id = *((S32*)((*it).second.client_data));
					signal = signalled_client.find(client_id);
					if (signal == not_signalled) continue;
					static const apr_int16_t POLL_CHAIN_ERROR =
						APR_POLLHUP | APR_POLLNVAL | APR_POLLERR;
					const apr_pollfd_t* poll = &(poll_fd[(*signal).second]);
					if(poll->rtnevents & POLL_CHAIN_ERROR)
					{
						// Potential eror condition has been
						// returned. If HUP was one of them, we pass
						// that as the error even though there may be
						// more. If there are in fact more errors,
						// we'll just wait for that detection until
						// the next pump() cycle to catch it so that
						// the logic here gets no more strained than
						// it already is.
						LLIOPipe::EStatus error_status;
						if(poll->rtnevents & APR_POLLHUP)
							error_status = LLIOPipe::STATUS_LOST_CONNECTION;
						else
							error_status = LLIOPipe::STATUS_ERROR;
						if(handleChainError(*run_chain, error_status)) break;
						ll_debug_poll_fd("Removing pipe", poll);
						llwarns << "Removing pipe "
							<< (*run_chain).mChainLinks[0].mPipe
							<< " '"
#if LL_DEBUG_PIPE_TYPE_IN_PUMP
							<< typeid(
								*((*run_chain).mChainLinks[0].mPipe)).name()
#endif
							<< "' because: "
							<< events_2_string(poll->rtnevents)
							<< llendl;
						(*run_chain).mHead = (*run_chain).mChainLinks.end();
						break;
					}

					// at least 1 fd got signalled, and there were no
					// errors. That means we process this chain.
					process_this_chain = true;
					break;
				}
			}
		}
		if(process_this_chain)
		{
			PUMP_DEBUG;
			if(!((*run_chain).mInit))
			{
				(*run_chain).mHead = (*run_chain).mChainLinks.begin();
				(*run_chain).mInit = true;
			}
			PUMP_DEBUG;
			processChain(*run_chain);
		}

		PUMP_DEBUG;
		if((*run_chain).mHead == (*run_chain).mChainLinks.end())
		{
#if LL_DEBUG_PIPE_TYPE_IN_PUMP
			lldebugs << "Removing chain " << (*run_chain).mChainLinks[0].mPipe
					<< " '"
					<< typeid(*((*run_chain).mChainLinks[0].mPipe)).name()
					<< "' because we reached the end." << llendl;
#else
//			lldebugs << "Removing chain " << (*run_chain).mChainLinks[0].mPipe
//					<< " because we reached the end." << llendl;
#endif

			PUMP_DEBUG;
			// This chain is done. Clean up any allocated memory and
			// erase the chain info.
			std::for_each(
				(*run_chain).mDescriptors.begin(),
				(*run_chain).mDescriptors.end(),
				ll_delete_apr_pollset_fd_client_data());
			run_chain = mRunningChains.erase(run_chain);

			// *NOTE: may not always need to rebuild the pollset.
			mRebuildPollset = true;
		}
		else
		{
			PUMP_DEBUG;
			// this chain needs more processing - just go to the next
			// chain.
			++run_chain;
		}
	}

	PUMP_DEBUG;
	// null out the chain
	mCurrentChain = current_chain_t();
	END_PUMP_DEBUG;
}