Beispiel #1
0
    uint64_t GetDefault() const override {
        auto c = GetContainer();

        if (c->IsRoot()) {
            struct sysinfo si;
            int ret = sysinfo(&si);
            if (ret)
                return -1;

            return si.uptime;
        }

        // we started recording raw start/death time since porto v1.15;
        // in case we updated from old version, return zero
        if (!c->Prop->Get<uint64_t>(P_RAW_START_TIME))
            c->Prop->Set<uint64_t>(P_RAW_START_TIME, GetCurrentTimeMs());

        if (!c->Prop->Get<uint64_t>(P_RAW_DEATH_TIME))
            c->Prop->Set<uint64_t>(P_RAW_DEATH_TIME, GetCurrentTimeMs());

        if (c->GetState() == EContainerState::Dead)
            return (c->Prop->Get<uint64_t>(P_RAW_DEATH_TIME) -
                    c->Prop->Get<uint64_t>(P_RAW_START_TIME)) / 1000;
        else
            return (GetCurrentTimeMs() -
                    c->Prop->Get<uint64_t>(P_RAW_START_TIME)) / 1000;
    }
Beispiel #2
0
    TUintMap GetDefault() const override {
        TUintMap m;

        m["spawned"] = Statistics->Spawned;
        m["errors"] = Statistics->Errors;
        m["warnings"] = Statistics->Warns;
        m["master_uptime"] = (GetCurrentTimeMs() - Statistics->MasterStarted) / 1000;
        m["slave_uptime"] = (GetCurrentTimeMs() - Statistics->SlaveStarted) / 1000;
        m["queued_statuses"] = Statistics->QueuedStatuses;
        m["queued_events"] = Statistics->QueuedEvents;
        m["created"] = Statistics->Created;
        m["remove_dead"] = Statistics->RemoveDead;
        m["slave_timeout_ms"] = Statistics->SlaveTimeoutMs;
        m["rotated"] = Statistics->Rotated;
        m["restore_failed"] = Statistics->RestoreFailed;
        m["started"] = Statistics->Started;
        m["running"] = GetContainer()->GetRunningChildren();
        uint64_t usage = 0;
        auto cg = MemorySubsystem.Cgroup(PORTO_DAEMON_CGROUP);
        TError error = MemorySubsystem.Usage(cg, usage);
        if (error)
            L_ERR() << "Can't get memory usage of portod" << std::endl;
        m["memory_usage_mb"] = usage / 1024 / 1024;
        m["epoll_sources"] = Statistics->EpollSources;
        m["containers"] = Statistics->Containers;
        m["volumes"] = Statistics->Volumes;
        m["clients"] = Statistics->Clients;

        return m;
    }
void Stopwatch::stop()
{
    if (mStop == 0)
    {
        mStop = GetCurrentTimeMs();
    }
}
void Stopwatch::start()
{
    if (mStart == 0)
    {
        mStart = GetCurrentTimeMs();
    }
}
Beispiel #5
0
    bool Handle(const TEvent &event) override {
        if (event.DueMs <= GetCurrentTimeMs()) {
            TContainer::Event(event);
            return true;
        }

        return false;
    }
Beispiel #6
0
void TClient::CloseConnection() {
    if (Fd >= 0) {
        ConnectionTime = GetCurrentTimeMs() - ConnectionTime;
        if (config().log().verbose())
            L() << "Client " << Fd << " disconnected : " << *this
                << " : " << ConnectionTime << " ms" <<  std::endl;
        close(Fd);
        Fd = -1;
    }
}
Beispiel #7
0
// Freezer
TError TFreezerSubsystem::WaitState(TCgroup &cg, const std::string &state) const {
    uint64_t deadline = GetCurrentTimeMs() + config().daemon().freezer_wait_timeout_s() * 1000;
    std::string cur;
    TError error;

    do {
        error = cg.Get("freezer.state", cur);
        if (error || StringTrim(cur) == state)
            return error;
    } while (!WaitDeadline(deadline));

    return TError(EError::Unknown, "Freezer " + cg.Name + " timeout waiting " + state);
}
Beispiel #8
0
    void Wait(TScopedLock &lock) override {
        if (!Valid)
            return;

        Statistics->QueuedEvents = Queue.size();

        if (Queue.size()) {
            auto now = GetCurrentTimeMs();
            if (Top().DueMs <= now)
                return;
            auto timeout = Top().DueMs - now;
            Statistics->SlaveTimeoutMs = timeout;
            Cv.wait_for(lock, std::chrono::milliseconds(timeout));
        } else {
            Statistics->SlaveTimeoutMs = 0;
            TWorker::Wait(lock);
        }
    }
UInt64 Stopwatch::elapsedMs() const
{
    if (mStart != 0)
    {
        if (mStop != 0)
        {
            return mStop - mStart;
        }
        else
        {
            return GetCurrentTimeMs() - mStart;
        }
    }
    else
    {
        return 0;
    }

}
Beispiel #10
0
void TClient::CloseConnection() {
    TScopedLock lock(Mutex);

    if (Fd >= 0) {
        EpollLoop->RemoveSource(Fd);
        ConnectionTime = GetCurrentTimeMs() - ConnectionTime;
        if (Verbose)
            L() << "Client disconnected: " << *this
                << " : " << ConnectionTime << " ms" <<  std::endl;
        close(Fd);
        Fd = -1;
    }

    for (auto &weakCt: WeakContainers) {
        auto container = weakCt.lock();
        if (container)
            container->DestroyWeak();
    }
    WeakContainers.clear();
}
Beispiel #11
0
    void Run()
	{
		break_ = false;

		// prepare the window events which we use to wake up on incoming data
		// we use this instead of select() primarily to support the AsyncBreak() 
		// mechanism.

		std::vector<HANDLE> events( socketListeners_.size() + 1, 0 );
		int j=0;
		for( std::vector< std::pair< PacketListener*, UdpSocket* > >::iterator i = socketListeners_.begin();
				i != socketListeners_.end(); ++i, ++j ){

			HANDLE event = CreateEvent( NULL, FALSE, FALSE, NULL );
			WSAEventSelect( i->second->impl_->Socket(), event, FD_READ ); // note that this makes the socket non-blocking which is why we can safely call RecieveFrom() on all sockets below
			events[j] = event;
		}


		events[ socketListeners_.size() ] = breakEvent_; // last event in the collection is the break event

		
		// configure the timer queue
		double currentTimeMs = GetCurrentTimeMs();

		// expiry time ms, listener
		std::vector< std::pair< double, AttachedTimerListener > > timerQueue_;
		for( std::vector< AttachedTimerListener >::iterator i = timerListeners_.begin();
				i != timerListeners_.end(); ++i )
			timerQueue_.push_back( std::make_pair( currentTimeMs + i->initialDelayMs, *i ) );
		std::sort( timerQueue_.begin(), timerQueue_.end(), CompareScheduledTimerCalls );

		const int MAX_BUFFER_SIZE = 4098;
		char *data = new char[ MAX_BUFFER_SIZE ];
		IpEndpointName remoteEndpoint;

		while( !break_ ){

			double currentTimeMs = GetCurrentTimeMs();

            DWORD waitTime = INFINITE;
            if( !timerQueue_.empty() ){

                waitTime = (DWORD)( timerQueue_.front().first >= currentTimeMs
                            ? timerQueue_.front().first - currentTimeMs
                            : 0 );
            }

			DWORD waitResult = WaitForMultipleObjects( (DWORD)socketListeners_.size() + 1, &events[0], FALSE, waitTime );
			if( break_ )
				break;

			if( waitResult != WAIT_TIMEOUT ){
				for( int i = waitResult - WAIT_OBJECT_0; i < (int)socketListeners_.size(); ++i ){
					int size = socketListeners_[i].second->ReceiveFrom( remoteEndpoint, data, MAX_BUFFER_SIZE );
					if( size > 0 ){
						socketListeners_[i].first->ProcessPacket( data, size, remoteEndpoint );
						if( break_ )
							break;
					}
				}
			}

			// execute any expired timers
			currentTimeMs = GetCurrentTimeMs();
			bool resort = false;
			for( std::vector< std::pair< double, AttachedTimerListener > >::iterator i = timerQueue_.begin();
					i != timerQueue_.end() && i->first <= currentTimeMs; ++i ){

				i->second.listener->TimerExpired();
				if( break_ )
					break;

				i->first += i->second.periodMs;
				resort = true;
			}
			if( resort )
				std::sort( timerQueue_.begin(), timerQueue_.end(), CompareScheduledTimerCalls );
		}

		delete [] data;

		// free events
		j = 0;
		for( std::vector< std::pair< PacketListener*, UdpSocket* > >::iterator i = socketListeners_.begin();
				i != socketListeners_.end(); ++i, ++j ){

			WSAEventSelect( i->second->impl_->Socket(), events[j], 0 ); // remove association between socket and event
			CloseHandle( events[j] );
			unsigned long enableNonblocking = 0;
			ioctlsocket( i->second->impl_->Socket(), FIONBIO, &enableNonblocking );  // make the socket blocking again
		}
	}
Beispiel #12
0
    void Run()
	{
		break_ = false;

		// configure the master fd_set for select()

		fd_set masterfds, tempfds;
		FD_ZERO( &masterfds );
		FD_ZERO( &tempfds );
		
		// in addition to listening to the inbound sockets we
		// also listen to the asynchronous break pipe, so that AsynchronousBreak()
		// can break us out of select() from another thread.
		FD_SET( breakPipe_[0], &masterfds );
		int fdmax = breakPipe_[0];		

		for( std::vector< std::pair< PacketListener*, UdpSocket* > >::iterator i = socketListeners_.begin();
				i != socketListeners_.end(); ++i ){

			if( fdmax < i->second->impl_->Socket() )
				fdmax = i->second->impl_->Socket();
			FD_SET( i->second->impl_->Socket(), &masterfds );
		}


		// configure the timer queue
		double currentTimeMs = GetCurrentTimeMs();

		// expiry time ms, listener
		std::vector< std::pair< double, AttachedTimerListener > > timerQueue_;
		for( std::vector< AttachedTimerListener >::iterator i = timerListeners_.begin();
				i != timerListeners_.end(); ++i )
			timerQueue_.push_back( std::make_pair( currentTimeMs + i->initialDelayMs, *i ) );
		std::sort( timerQueue_.begin(), timerQueue_.end(), CompareScheduledTimerCalls );

		const int MAX_BUFFER_SIZE = 4098;
		char *data = new char[ MAX_BUFFER_SIZE ];
		IpEndpointName remoteEndpoint;

		struct timeval timeout;

		while( !break_ ){
			tempfds = masterfds;

			struct timeval *timeoutPtr = 0;
			if( !timerQueue_.empty() ){
				double timeoutMs = timerQueue_.front().first - GetCurrentTimeMs();
				if( timeoutMs < 0 )
					timeoutMs = 0;
			
				// 1000000 microseconds in a second
				timeout.tv_sec = (long)(timeoutMs * .001);
				timeout.tv_usec = (long)((timeoutMs - (timeout.tv_sec * 1000)) * 1000);
				timeoutPtr = &timeout;
			}

			if( select( fdmax + 1, &tempfds, 0, 0, timeoutPtr ) < 0 && errno != EINTR ){
   				if (!break_) throw std::runtime_error("select failed\n");
				else break;
			}

			if ( FD_ISSET( breakPipe_[0], &tempfds ) ){
				// clear pending data from the asynchronous break pipe
				char c;
				ssize_t ret; 
				ret = read( breakPipe_[0], &c, 1 );
			}
			
			if( break_ )
				break;

			for( std::vector< std::pair< PacketListener*, UdpSocket* > >::iterator i = socketListeners_.begin();
					i != socketListeners_.end(); ++i ){

				if( FD_ISSET( i->second->impl_->Socket(), &tempfds ) ){

					int size = i->second->ReceiveFrom( remoteEndpoint, data, MAX_BUFFER_SIZE );
					if( size > 0 ){
						i->first->ProcessPacket( data, size, remoteEndpoint );
						if( break_ )
							break;
					}
				}
			}

			// execute any expired timers
			currentTimeMs = GetCurrentTimeMs();
			bool resort = false;
			for( std::vector< std::pair< double, AttachedTimerListener > >::iterator i = timerQueue_.begin();
					i != timerQueue_.end() && i->first <= currentTimeMs; ++i ){

				i->second.listener->TimerExpired();
				if( break_ )
					break;

				i->first += i->second.periodMs;
				resort = true;
			}
			if( resort )
				std::sort( timerQueue_.begin(), timerQueue_.end(), CompareScheduledTimerCalls );
		}

		delete [] data;
	}
Beispiel #13
0
// Returns time in seconds.
int64 Timer::GetCurrentTime()
{
	return GetCurrentTimeMs() * 0.001;
}
Beispiel #14
0
void TEventQueue::Add(uint64_t timeoutMs, const TEvent &e) {
    TEvent copy = e;
    copy.DueMs = GetCurrentTimeMs() + timeoutMs;
    Worker->Push(copy);
}
Beispiel #15
0
uint64_t TClient::GetRequestTimeMs() {
    return GetCurrentTimeMs() - RequestStartMs;
}
Beispiel #16
0
void TClient::BeginRequest() {
    RequestStartMs = GetCurrentTimeMs();
}
Beispiel #17
0
TClient::TClient(std::shared_ptr<TEpollLoop> loop, int fd) : TEpollSource(loop, fd) {
    ConnectionTime = GetCurrentTimeMs();
    Statistics->Clients++;
}
/**
  * @isFirstPacket first packet in a frame
  * @isMarketPacket last packet in a frame
  */
PJ_DEF(int) jitter_buffer_insert_packet(Jitter_Buffer *jitter_buffer, 
pj_uint16_t seq, pj_uint32_t ts, pjmedia_frame_type frame_type, 
char *payload, int size, 
pj_bool_t isFirstPacket, pj_bool_t isMarketPacket) {
    int ret = 0;
    if(pj_mutex_lock(jitter_buffer->jb_mutex) != PJ_SUCCESS) {
        return -1;
    }
    pj_ssize_t current;
    JTPacket *packet = NULL;
    Frame_Buffer *frame;
    // is running
    if(!jitter_buffer->running) {
        ret = -2;
        goto ON_RET;
    }
    // sanity check
    if(ts == 0 || (frame_type != PJMEDIA_FRAME_TYPE_EMPTY && size == 0)) {
        ret = -3;
        goto ON_RET;
    }
    if(!jitter_buffer->first_packet) {
        isFirstPacket = PJ_TRUE;
        jitter_buffer->first_packet = PJ_TRUE;
    }
    //clean old frames
    //CleanOldFrames(jitter_buffer);
    //update jitter
    current = GetCurrentTimeMs();
    if(frame_type != PJMEDIA_FRAME_TYPE_EMPTY && frame_type != PJMEDIA_FRAME_TYPE_NONE) {
        if(jitter_buffer->waiting_for_completed_frame.timestamp == ts) {
            jitter_buffer->waiting_for_completed_frame.frame_size += size;
            jitter_buffer->waiting_for_completed_frame.latest_packet_timestamp = current;
        } else if(jitter_buffer->waiting_for_completed_frame.latest_packet_timestamp > 0 && 
            current - jitter_buffer->waiting_for_completed_frame.latest_packet_timestamp  > 2000) {
                //too old
                UpdateJitterEstimatorForWaitingFrame(jitter_buffer, &jitter_buffer->waiting_for_completed_frame);
                jitter_buffer->waiting_for_completed_frame.frame_size = 0;
                jitter_buffer->waiting_for_completed_frame.latest_packet_timestamp = -1;
                jitter_buffer->waiting_for_completed_frame.timestamp = 0;
        }
    }
    //create packet
    packet = NULL;
    if(jt_packet_create(&packet, &jitter_buffer->packet_alloc, seq, ts, frame_type, 
                    isFirstPacket, isMarketPacket, payload, size) != 0) {
        if(packet)
            list_alloc_insert(packet, &jitter_buffer->packet_alloc);
        ret = -1;
        goto ON_RET;
    }
    //GetCurrentTimeInLocal(packet->time_in_jb, 60);
    if(!isFirstPacket) {
        //is first packet in frame
        isFirstPacket = first_packet_in_frame(jitter_buffer, packet);
        packet->isFirst = isFirstPacket;
    }
    if(isMarketPacket) {
        //check if next packet is first packet
        Frame_Buffer *next_frame = findFrame(&jitter_buffer->frameList, packet->ts, '>');
        if(next_frame != NULL) {
            JTPacket * first_packet = next_frame->session_info.packetList.next;
            if(packet != &next_frame->session_info.packetList) {
                if(InSequence(packet->seq, first_packet->seq)) {
                    first_packet->isFirst = PJ_TRUE;
                }
            }
        }
    }
    //clean old frames
    CleanOldFrames(jitter_buffer);   
    // is old packet 
    if(decode_state_isOldPacket(packet, &jitter_buffer->decode_state)) {
            decode_state_updateOldPacket(packet, &jitter_buffer->decode_state);
            list_alloc_insert(packet, &jitter_buffer->packet_alloc);
            ret = -1;
            goto ON_RET;
    }
    
    // find or alloc a frame
    frame = findFrame(&jitter_buffer->frameList, packet->ts, '=');
    if(frame == NULL) { //alloc one
        //if(jitter_buffer->number_of_frames > jitter_buffer->max_number_of_frames) {
        if(jitter_buffer->number_of_frames > jitter_buffer->max_number_of_frames) {
            //clean old frames at least one
            Frame_Buffer *oldestFrame = jitter_buffer->frameList.next;
            if(oldestFrame != &jitter_buffer->frameList)
                RealseFrame(jitter_buffer, oldestFrame);
        }
        list_alloc_alloc(Frame_Buffer, &frame, &jitter_buffer->frame_alloc);
        //init
        frame_buffer_init(frame, &jitter_buffer->packet_alloc);
    }
    //insert packet into the frame
    ret = frame_buffer_insert_packet(frame, packet);
    if(ret > 0) {
        list_alloc_insert(packet, &jitter_buffer->packet_alloc);
        frame_buffer_reset(frame);
        list_alloc_insert(frame, &jitter_buffer->frame_alloc);
        ret = -1;
        goto ON_RET;
    } else if (ret < 0) {
        frame_buffer_reset(frame);
        list_alloc_insert(frame, &jitter_buffer->frame_alloc);
        ret = -1;
        goto ON_RET;
    } else {
        event_set(jitter_buffer->packet_event);
        if(packet->isRetrans)
            frame_buffer_IncrementNackCount(frame);
    }
    
    //insert frame to frame list
    if(findFrame(&jitter_buffer->frameList, frame->ts, '=') == NULL) {
        Frame_Buffer *prev_frame = findFrame(&jitter_buffer->frameList, frame->ts, '<');
        prev_frame = (prev_frame == NULL ? &jitter_buffer->frameList : prev_frame);
        pj_list_insert_after(prev_frame, frame);
        event_set(jitter_buffer->frame_event);
        jitter_buffer->number_of_frames++;
    }
    ON_RET:
    pj_mutex_unlock(jitter_buffer->jb_mutex);
    return ret;
    
}
void Stopwatch::restart()
{
    mStart = GetCurrentTimeMs();
    mStop = 0;
}
Beispiel #20
0
void TClient::StartRequest() {
    RequestStartMs = GetCurrentTimeMs();
    PORTO_ASSERT(CurrentClient == nullptr);
    CurrentClient = this;
}