示例#1
0
Timer* get_bound_timer() {
    if(timers().find(bound_timer_id_) == timers().end()) {
        return NULL;
    }

    return timers()[bound_timer_id_].get();
}
示例#2
0
void ktiBindTimer(KTIuint name) {
    if(timers().find(name) == timers().end()) {
        bound_timer_id_ = 0;
    }

    bound_timer_id_ = name;
}
示例#3
0
void ktiGenTimers(KTIsizei n, KTIuint* names) {
    for(KTIuint i = 0; i < n; ++i) {
        KTIuint new_id = get_next_timer_id();
        timers()[new_id].reset(new Timer());
        names[i] = new_id;
    }
}
示例#4
0
TEST_F(TimerPerfTest, PostAndRunTimers) {
  const int numIterations = 10000;
  Vector<std::unique_ptr<Timer<TimerPerfTest>>> timers(numIterations);
  for (int i = 0; i < numIterations; i++) {
    timers[i].reset(new Timer<TimerPerfTest>(this, &TimerPerfTest::nopTask));
  }

  Timer<TimerPerfTest> measureRunStart(this,
                                       &TimerPerfTest::recordStartRunTime);
  Timer<TimerPerfTest> measureRunEnd(this, &TimerPerfTest::recordEndRunTime);

  measureRunStart.startOneShot(0.0, BLINK_FROM_HERE);
  base::ThreadTicks postStart = base::ThreadTicks::Now();
  for (int i = 0; i < numIterations; i++) {
    timers[i]->startOneShot(0.0, BLINK_FROM_HERE);
  }
  base::ThreadTicks postEnd = base::ThreadTicks::Now();
  measureRunEnd.startOneShot(0.0, BLINK_FROM_HERE);

  testing::enterRunLoop();

  double postingTime = (postEnd - postStart).InMicroseconds();
  double postingTimeUsPerCall =
      postingTime / static_cast<double>(numIterations);
  LOG(INFO) << "TimerBase::startOneShot cost (us/call) " << postingTimeUsPerCall
            << " (total " << postingTime << " us)";
  LOG(INFO) << "Time to run " << numIterations << " trivial tasks (us) "
            << (m_runEnd - m_runStart).InMicroseconds();
}
// [[Rcpp::export]]
List detail_make_index_parallel( DataFrame data, CharacterVector by ){
    int n = data.nrows() ;
    Timers timers(n);
    
    Visitors visitors(data, by) ;  
    
    IndexMaker indexer(visitors) ;
    TimedReducer<IndexMaker, Timer, tbb::mutex, tbb::mutex::scoped_lock> timed_indexer(indexer, timers) ;
    
    parallelReduce(0, n, timed_indexer, 100) ;
    return timed_indexer.get() ;
    
}
void initialize(void){
     SYSTEMConfig(80000000, SYS_CFG_ALL); // sets up periferal and clock configuration
     INTEnableSystemMultiVectoredInt();
     INTEnableInterrupts(); // enable interrupts
     delay();
     timers();
     delay();
     PWM();
     delay();
     UART();
     delay();
     beginLIDARdecoder(returned_data, &buffer_five);
}
gboolean GtkTileMapToolkit::timeoutCallback(CallbackInfo* info)
{
   
   GdkCBSync sync;
   uint32 timer_id = info->context_id;
   
   TimerMap_t& timers(info->area->m_timers);

   TimerMap_t::iterator itr = timers.find(timer_id);
   
   if(itr != timers.end()) {
      TileMapTimerListener* listener = itr->second.listener;
      timers.erase(timer_id);
      listener->timerExpired(timer_id);
   }

   delete info;
   
   return false;
}
示例#8
0
//-------------------------------------------------------------------------------------
void ServerApp::handleTimers()
{
	AUTO_SCOPED_PROFILE("callTimers");
	timers().process(g_kbetime);
}
示例#9
0
void ktiDeleteTimers(KTIsizei n, const KTIuint* names) {
    for(KTIuint i = 0; i < n; ++i) {
        timers().erase(names[i]);
    }
}
示例#10
0
//-------------------------------------------------------------------------------------
void ClientApp::handleTimers()
{
	timers().process(g_kbetime);
}
示例#11
0
//version 0 uses the utransfer
//depricated function, uses etransfer instead.
void host_monitor_transfer_0(){
#if 0	
	int portnumber = UDP_HOST_MONITOR_SRV_PORT_NUMBER;
	CPPTimers timers(4);
	timers.setTimer(0,3000);
	timers.trigger(0);
	char recv_buffer[MSG_BUFF_SIZE];
	char send_buffer[MSG_BUFF_SIZE];
	int recv_size = 0;
	int send_size = 0;
	//todo need to study the global static member 
	UTransfer *transfer = UTransfer::get_instance();
	//unsigned short port = portnumber;
	
	transfer->init_tcp(portnumber);
	list<upoll_t>src;
	list<upoll_t>dst;
	list<upoll_t>::iterator it;
	USocket*tcp_listener = transfer->get_tcp_listener();
	upoll_t ls_poll;
	ls_poll.events = UPOLL_READ_T;
	ls_poll.pointer = NULL;
	ls_poll.usock = tcp_listener;
	src.push_back(ls_poll);
	map<USocket*,upoll_t> m_tcp_map;
	struct timeval base_tm = {0,100};
	struct timeval wait_tm;
	signal(SIGPIPE,SIG_IGN);
	fprintf(stderr,"debug host_monitor_transfer_0 portnumber:%d\n",portnumber);
	while(true)
	{
		dst.clear();
		src.clear();
		map<USocket*,upoll_t>::iterator mit;
		for(mit = m_tcp_map.begin();mit!=m_tcp_map.end();mit++)
			src.push_back(mit->second);
		src.push_back(ls_poll);
		wait_tm =base_tm;
		int res = transfer->select(dst,src,&wait_tm);
		if(res>0)
		{
			fprintf(stderr,"debug transfer->select\n");
			for(it=dst.begin();it!=dst.end();it++)
			{
				upoll_t up = *it;
				if(up.usock==tcp_listener)
				{
					if(up.events & UPOLL_READ_T)
					{
						USocket *sc = transfer->accept(tcp_listener);
						if(sc)
						{
							upoll_t new_up;
							new_up.pointer = NULL;
							new_up.usock = sc;
							new_up.events = UPOLL_READ_T;
							m_tcp_map[sc] = new_up;
							cerr<<"debug host_monitor accept socket."<<endl;
						}
					}
				}//up.usock == tcp_listener
				else if(up.events & UPOLL_WRITE_T){
				it->usock->send((char *)(it->pointer), MSG_BUFF_SIZE, NULL);
				free(it->pointer);
				it->pointer = NULL;
			   	transfer->destroy_socket(it->usock);
                                m_tcp_map.erase(it->usock);
				}
				else if(up.events & UPOLL_READ_T)
				{
						int recv_size = it->usock->recv(recv_buffer,MSG_BUFF_SIZE,NULL);
						if(recv_size>0){
						char *send_buffer = (char *)malloc(sizeof(char)*MSG_BUFF_SIZE);	
						upoll_t write_up;
						write_up.usock = it->usock;
						write_up.pointer = send_buffer;
						write_up.events = UPOLL_WRITE_T;
						m_tcp_map[it->usock] = write_up;
						int type  = *((int *)recv_buffer);
						int src   = *((int *)recv_buffer+1);
						int dist  = *((int *)recv_buffer+2);
						int qh_id = *((int *)recv_buffer+3);
						if(qh_id>=1){
							*((int *)send_buffer) = type;
							*((int *)send_buffer+1) = local_id;
							*((int *)send_buffer+2) = src;
							*((int *)send_buffer+3) = qh_id;
							for(int i = 0;i<num_hosts;i++){
								*((int *)send_buffer+MSG_HEAD_SIZE+i) = pcstat[i];
							}
						}//if(qh_id == 0)
						if(qh_id == 0){
							*((int *)send_buffer) = type;
							*((int *)send_buffer+1) = local_id;
							*((int *)send_buffer+2) = src;
							*((int *)send_buffer+3) = qh_id;
							pthread_mutex_lock(&hostq_lock);
							for(int i = 0;i<num_hosts;i++){
							epbs_host_info *phost = (epbs_host_info *)(send_buffer+
								sizeof(int)*MSG_HEAD_SIZE+sizeof(epbs_host_info)*i);
							phost->host_stat = hostq[i].host_stat;
							phost->host_id = i+1;
							strncpy(phost->ip,hostq[i].ip,32);	
							}//for
							pthread_mutex_unlock(&hostq_lock);
						}//if
						}//if(recv_size>0)
				}//else
			}
		}
	}
#endif
}
示例#12
0
//-------------------------------------------------------------------------------------
void ServerApp::handleTimers()
{
    timers().process(time_);
}