void ServerObjectService::onTimer()
    {
        boost::uint32_t nowMs = getCurrentTimeMs();
        {
            Lock lock(mMutex);
            ServerObjectMap::iterator iter = mServerObjectMap.begin();
            while ( iter != mServerObjectMap.end() )
            {
                ServerObjectHolder & holder = iter->second;
                if ( holder.mUseCount == 0
                    && (nowMs - holder.mLastTouchMs > holder.mTimeoutMs) )
                {
                    mServerObjectMap.erase(iter++);
                }
                else
                {
                    iter++;
                }
            }
            mLastHarvestMs = nowMs;
        }

        // Clean up HTTP sessions as well.
        mpRcfServer->harvestHttpSessions();
    }
        boost::shared_ptr<T> getServerObjectImpl(
            const std::string & objectKey, 
            boost::uint32_t timeoutMs, 
            bool createIfDoesntExist)
        {
            typedef boost::shared_ptr<T> TPtr;

            Lock lock(mMutex);

            ServerObjectMap::iterator iter = mServerObjectMap.find(objectKey);
            if (iter != mServerObjectMap.end())
            {
                ServerObjectHolder & holder = iter->second;
                boost::any & a = holder.mServerObject;
                TPtr * ptPtr = boost::any_cast<TPtr>(&a);
                RCF_ASSERT(ptPtr);
                TPtr tPtr = *ptPtr;
                T * pt = tPtr.get();
                RCF_ASSERT(pt);

                // Return shared_ptr with custom deleter.
                holder.mLastTouchMs = getCurrentTimeMs();
                RCF_ASSERT(holder.mUseCount >= 0);
                ++holder.mUseCount;
                TPtr ptr(pt, boost::bind(&ServerObjectService::customDeleter, this, objectKey, _1));
                return ptr;
            }
            else if (createIfDoesntExist)
            {
                T * pt = new T();
                TPtr tPtr(pt);
                mServerObjectMap[objectKey] = ServerObjectHolder(boost::any(tPtr), timeoutMs);
                ServerObjectHolder & holder = mServerObjectMap[objectKey];

                // Return shared_ptr with custom deleter.
                holder.mLastTouchMs = getCurrentTimeMs();
                RCF_ASSERT(holder.mUseCount >= 0);
                ++holder.mUseCount;
                TPtr ptr(pt, boost::bind(&ServerObjectService::customDeleter, this, objectKey, _1));
                return ptr;
            }
            else
            {
                return TPtr();
            }
        }
void jaspResults::startProgressbar(int expectedTicks, int timeBetweenUpdatesInMs)
{
	_progressbarExpectedTicks		= expectedTicks;
	_progressbarBetweenUpdatesTime	= timeBetweenUpdatesInMs;
	_progressbarLastUpdateTime		= getCurrentTimeMs();
	_progressbarTicks				= 0;

	response["progress"]			= 0;

	send();
}
    void ServerObjectService::customDeleter(const std::string & objectKey, void * pt)
    {
        RCF_UNUSED_VARIABLE(pt);

        Lock lock(mMutex);

        ServerObjectMap::iterator iter = mServerObjectMap.find(objectKey);
        RCF_ASSERT(iter != mServerObjectMap.end());
        ServerObjectHolder & holder = iter->second;
        RCF_ASSERT(holder.mUseCount > 0);
        --holder.mUseCount;
        holder.mLastTouchMs = getCurrentTimeMs();
    }
	void push(T* data) {
		boost::mutex::scoped_lock lock(_mutex);
		if (_queue.size() >= size) {
			T* pDataToDelete = _queue.front().data;
			_queue.pop();
			if(pDataToDelete)
			{
				delete pDataToDelete;
			}
		}
		Element e = { getCurrentTimeMs(), data };
		_queue.push(e);
		lock.unlock();
		_conditionVar.notify_one();
	}
void jaspResults::progressbarTick()
{
	checkForAnalysisChanged();

	_progressbarTicks++;

	int progress			= std::lround(100.0f * ((float)_progressbarTicks) / ((float)_progressbarExpectedTicks));	progress				= std::min(100, std::max(progress, 0));
	response["progress"]	= progress;

	int curTime = getCurrentTimeMs();
	if(curTime - _progressbarLastUpdateTime > _progressbarBetweenUpdatesTime || progress == 100)
	{
		send();
		_progressbarLastUpdateTime = curTime;
	}
}
    std::size_t Win32NamedPipeClientTransport::implWriteAsync(
        const std::vector<ByteBuffer> &byteBuffers)
    {
        mAsyncMode = true;

        RecursiveLock lock(mOverlappedPtr->mMutex);

        mOverlappedPtr->mOpType = OverlappedAmi::Write;

        // Construct an OVERLAPPED-derived object to contain the handler.
        ASIO_NS::windows::overlapped_ptr overlapped(
            mSocketPtr->get_io_service(), 
            AmiIoHandler(mOverlappedPtr));

        const ByteBuffer & byteBuffer = byteBuffers.front();

        DWORD dwBytesWritten = 0;

        bool writeOk = false;

        BOOL ok = WriteFile(
            mhPipe,
            byteBuffer.getPtr(),
            static_cast<DWORD>(byteBuffer.getLength()),
            &dwBytesWritten,
            overlapped.get());

        DWORD dwErr = GetLastError();;

        if (!ok &&  (
                    dwErr == ERROR_IO_PENDING 
                ||  dwErr == WSA_IO_PENDING 
                ||  dwErr == ERROR_MORE_DATA))
        {
            writeOk = true;
        }
        else if (dwBytesWritten)
        {
            writeOk = true;
        }

        if (writeOk)
        {
            // The operation was successfully initiated, so ownership of the
            // OVERLAPPED-derived object has passed to the io_service.
            overlapped.release();

            // Set timer.
            boost::uint32_t nowMs = getCurrentTimeMs();
            boost::uint32_t timeoutMs = mEndTimeMs - nowMs;
            mAsioTimerPtr->expires_from_now( boost::posix_time::milliseconds(timeoutMs) );

            mAsioTimerPtr->async_wait( AmiTimerHandler(mOverlappedPtr) );
        }
        else
        {
            // The operation completed immediately, so a completion notification needs
            // to be posted. When complete() is called, ownership of the OVERLAPPED-
            // derived object passes to the io_service.

            AsioErrorCode ec(
                dwErr,
                ASIO_NS::error::get_system_category());

            overlapped.complete(ec, 0);
        }

        return 0;
    }
 /**
  * @returns the time value until expiration (in milliseconds)
  * @note If timer hasn't expired, zero value is returned.
  */
 inline uint64_t getTimeToExpirationMs(void) const
 {
     uint64_t now = getCurrentTimeMs();
     const bool isExpired = (now >= mTargetMs);
     return isExpired ? 0 : (mTargetMs - now);
 }
 /// Resets the timer from this point of time using the previous timeout interval
 inline void reset(void) { mTargetMs = getCurrentTimeMs() + mIntervalMs; }
 /**
  * Resets the timer from this point of time using the new timer value given.
  * @param ms  The milliseconds at which timer should expire next.
  */
 inline void reset(uint64_t ms) { mIntervalMs = ms; mTargetMs = getCurrentTimeMs() + ms; }
 /// @returns true if the timer has expired
 inline bool expired(void) const
 { return (0 == mIntervalMs) ? false : ( getCurrentTimeMs() >= mTargetMs); }
 /**
  * @returns the time value since expiration (in milliseconds)
  * @note If timer hasn't expired, zero value is returned.
  */
 inline uint64_t getTimeSinceExpirationMs(void) const
 {
     uint64_t now = getCurrentTimeMs();
     const bool isExpired = (now >= mTargetMs);
     return isExpired ? (now - mTargetMs) : 0;
 }
    int UdpClientTransport::receive(
        I_ClientTransportCallback &clientStub, 
        ByteBuffer &byteBuffer,
        unsigned int timeoutMs)
    {
        // try to receive a UDP message from server, within the current timeout
        RCF_LOG_4()(mSock)(mDestIp.string())(timeoutMs) << "UdpClientTransport - receiving data from socket.";

        RCF_ASSERT(!mAsync);

        unsigned int startTimeMs = getCurrentTimeMs();
        unsigned int endTimeMs = startTimeMs + timeoutMs;

        while (true)
        {
            unsigned int timeoutMs = generateTimeoutMs(endTimeMs);
            fd_set fdSet;
            FD_ZERO(&fdSet);
            FD_SET( static_cast<SOCKET>(mSock), &fdSet);
            timeval timeout;
            timeout.tv_sec = timeoutMs/1000;
            timeout.tv_usec = 1000*(timeoutMs%1000);

            int ret = Platform::OS::BsdSockets::select(
                mSock+1,
                &fdSet,
                NULL,
                NULL,
                &timeout);

            int err = Platform::OS::BsdSockets::GetLastError();

            RCF_ASSERT(-1 <= ret && ret <= 1)(ret);
            if (ret == -1)
            {
                Exception e(
                    _RcfError_Socket("select()"),
                    err,
                    RcfSubsystem_Os);

                RCF_THROW(e);
            }   
            else if (ret == 0)
            {
                Exception e( _RcfError_ClientReadTimeout() );
                RCF_THROW(e);
            }
            RCF_ASSERT_EQ(ret , 1);

            if (mReadVecPtr.get() == NULL || !mReadVecPtr.unique())
            {
                mReadVecPtr.reset( new ReallocBuffer());
            }

            // TODO: optimize
            ReallocBuffer &buffer = *mReadVecPtr;
            buffer.resize(4);

            SockAddrStorage fromAddr;
            memset(&fromAddr, 0, sizeof(fromAddr));
            int fromAddrLen = sizeof(fromAddr);

            sockaddr * pDestAddr = NULL;
            Platform::OS::BsdSockets::socklen_t destAddrSize = 0;
            mDestIp.getSockAddr(pDestAddr, destAddrSize);

            int len = Platform::OS::BsdSockets::recvfrom(
                mSock,
                &buffer[0],
                4,
                MSG_PEEK,
                (sockaddr *) &fromAddr,
                &fromAddrLen);

            err = Platform::OS::BsdSockets::GetLastError();

            if (    len == 4 
                ||  (len == -1 && err == Platform::OS::BsdSockets::ERR_EMSGSIZE))
            {
                mFromIp.init( (sockaddr&) fromAddr, fromAddrLen, mDestIp.getType());
                if (mDestIp.matches(mFromIp))
                {
                    boost::uint32_t dataLength = 0;
                    memcpy( &dataLength, &buffer[0], 4);
                    RCF::networkToMachineOrder(&dataLength, 4, 1);

                    if (getMaxMessageLength())
                    {
                        RCF_VERIFY(
                            0 < dataLength && dataLength <= getMaxMessageLength(),
                            Exception(_RcfError_ClientMessageLength(dataLength, getMaxMessageLength())));
                    }

                    buffer.resize(4+dataLength);
                    memset(&fromAddr, 0, sizeof(fromAddr));
                    fromAddrLen = sizeof(fromAddr);

                    len = Platform::OS::BsdSockets::recvfrom(
                        mSock,
                        &buffer[0],
                        dataLength+4,
                        0,
                        (sockaddr *) &fromAddr,
                        &fromAddrLen);

                    if (len == static_cast<int>(dataLength+4))
                    {
                        mLastResponseSize = dataLength+4;
                        mRunningTotalBytesReceived += dataLength+4;

                        byteBuffer = ByteBuffer(
                            &buffer[4],
                            dataLength,
                            4,
                            mReadVecPtr);

                        clientStub.onReceiveCompleted();

                        return 1;
                    }
                }
                else
                {
                    // The packet is not a valid response, but we need to read
                    // it so we can receive more packets.

                    const std::size_t BufferSize = 4096;
                    char Buffer[BufferSize];
                    Platform::OS::BsdSockets::recvfrom(
                        mSock,
                        Buffer,
                        BufferSize,
                        0,
                        NULL,
                        NULL);
                }
            }
            else
            {
                RCF_THROW( Exception( _RcfError_ClientReadFail() ) )(len)(err);
            }
        }
    }