///////////////////////////////////////////////////////////////////////////////
// Function: int CLogger::Stop()
//
// Description: Flush the logger queue, stop the worker thread and close the 
// log file
//
// Return: int - 0 on success, otherwise failure
//
// Parameters: none
//
///////////////////////////////////////////////////////////////////////////////
int CLogger::Stop()
{
   boost::mutex::scoped_lock l_controlLock( m_controlMutex);
   if ( m_state == LOGGER_RUNNING )
   {
      m_state = LOGGER_STOPPING;

      m_semaphore.notify_one();
      m_thread->join();

      boost::mutex::scoped_lock l_lock( m_queueMutex);

      LogItemSptr l_logItem;
      while ( ! m_queue.empty() )
      {
         l_logItem = m_queue.front();
         m_queue.pop();
         if ( ! ! l_logItem )
         {
            OutputLogItem( l_logItem);
         }
      }

      OutputLogItem( FormatedLogItem( LOG_LEVEL_ALWAYS, *this, "Stop", "stopped"));

      if ( m_logfile.is_open())
      {
         m_logfile.close();
      }

      m_state = LOGGER_STOPPED;
   }
   return 0;
}
Esempio n. 2
0
	lua_State *State::raw()
	{
		if (!pimpl_)
			return 0;
		lock_guard<mutex> l_lock(pimpl_->mutex_);
		return pimpl_->s;
	}
Esempio n. 3
0
/**
* Offer help to a randomly picked host
*/
void PROCESSOR::offer_help() {
	if(!help_messages
		&& n_idle_processors == n_processors 
		&& !use_abdada_cluster
		) {
			register int i, count = 0,dest;

			l_lock(lock_smp);
			for(i = 0;i < n_processors;i++) {
				if(processors[i]->state == WAIT) 
					count++;
			}
			l_unlock(lock_smp);
			
			if(count == n_processors) {
				while(true) {
					dest = (rand() % n_hosts);
					if((dest != host_id) && (dest != prev_dest)) 
						break;
				}
				ISend(dest,HELP);
				help_messages++;
				prev_dest = dest;
			}
	}
}
Esempio n. 4
0
int run(void) {
    printf("%s: starting...\n", get_instance_name());
    l_lock();
    printf("%s: got lock!\n", get_instance_name());
    l_unlock();
    printf("%s: released\n", get_instance_name());
    return 0;
}
	void CLogger::DoCleanupThread()
	{
		_stopped = true;
		{
			std::unique_lock< std::mutex > l_lock( _mutexThreadEnded );
			_threadEnded.wait( l_lock );
		}
		_logThread.join();
	}
///////////////////////////////////////////////////////////////////////////////
// Function: void CLogger::QueueLogItem(LogItemSptr &a_logItem)
//
// Description: Add a log item to the queue
//
// Return: void
//
// Parameters: LogItemSptr &a_logItem
//
///////////////////////////////////////////////////////////////////////////////
void CLogger::QueueLogItem( LogItemSptr& a_logItem)
{

   {  // scope for lock
      boost::mutex::scoped_lock l_lock( m_queueMutex);
      m_queue.push( a_logItem);
   }
   m_semaphore.notify_one();
}
CMrcpTaskProcessor::~CMrcpTaskProcessor() 
{
	CMrcpTaskProcessorByStringMap::iterator l_pos;

	if ((l_pos = m_taskProcessors.find(m_idString)) != m_taskProcessors.end())
	{
		boost::mutex::scoped_lock l_lock( m_singleton);
		m_taskProcessors.erase(m_idString);
	}
}
	void CLogger::DoFlushQueue( bool display )
	{
		if ( !_queue.empty() )
		{
			MessageQueue queue;

			{
				std::unique_lock< std::mutex > l_lock( _mutexQueue );
				std::swap( queue, _queue );
			}

			_impl->LogMessageQueue( queue, display );
		}
	}
	void CLogger::DoPushMessage( ELogType logLevel, std::wstring const & message )
	{
		if ( logLevel >= _logLevel )
		{
#if !defined( NDEBUG )
			{
				std::unique_lock< std::mutex > lock( _mutex );
				_impl->PrintMessage( logLevel, message );
			}
#endif
			std::unique_lock< std::mutex > l_lock( _mutexQueue );
			_queue.push_back( std::make_unique< SWMessage >( logLevel, message ) );
		}
	}
Esempio n. 10
0
///////////////////////////////////////////////////////////////////////////////
// Function: void CLogger::ProcessLogItems()
//
// Description: Pop log items from the log queue and outpout them
//
// Return: void
//
// Parameters: none
//
///////////////////////////////////////////////////////////////////////////////
void CLogger::ProcessLogItems()
{

   { // scope for lock
      boost::mutex::scoped_lock l_controlLock( m_controlMutex);
      m_state = LOGGER_RUNNING;
   }
 
   OutputLogItem( FormatedLogItem( LOG_LEVEL_ALWAYS, *this, "ProcessLogItems", "started"));

   LogItemSptr l_logItem;

   boost::mutex::scoped_lock l_semaphoreLock( m_semaphoreMutex);

   while( m_state == LOGGER_RUNNING )
   {

      if ( m_queue.empty())
      {
         m_semaphore.wait( l_semaphoreLock);
      }

      {  // scope for lock
         boost::mutex::scoped_lock l_lock( m_queueMutex);
         if ( !m_queue.empty())
         {
            l_logItem = m_queue.front();
            m_queue.pop();
         }
         else
         {
            continue;
         }
      }
      OutputLogItem( l_logItem);

      if ( m_logfile.is_open())
      {
         m_logfile.close();
         m_logfile.open( "/var/snowshore/log/MrcpClientLibrary.log",std::ios_base::app);
      }

   }
 
}
	void CLogger::DoInitialiseThread()
	{
		_stopped = false;
		_logThread = std::thread( [this]()
		{
			while ( !_stopped )
			{
				DoFlushQueue( true );
				std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) );
			}

			DoFlushQueue( false );
			{
				std::unique_lock< std::mutex > l_lock( _mutexThreadEnded );
				_threadEnded.notify_one();
			}
		} );
	}
int CMrcpTaskProcessor::QueueTask( MrcpTasks* a_mrcpTask) 
{ 
   Name("QueueTask");
   if ( EventProcessorRunning())
   {
      { // scope for lock
         boost::mutex::scoped_lock l_lock( m_queueMutex);
         m_queue.push( a_mrcpTask);
      }
      m_semaphore.notify_one();
      return MrcpSuccess;
   }
   else
   {
	   CLogger::Instance()->Log( LOG_LEVEL_ERROR, *this, "Processor not running");
	   return -1;
   }
}
Esempio n. 13
0
///////////////////////////////////////////////////////////////////////////////
// Function: int CLogger::Start()
//
// Description: Open the log file and start the logger worker thread
//
// Return: int - 0 on success, otherwise failure
//
// Parameters: none
//
///////////////////////////////////////////////////////////////////////////////
int CLogger::Start()
{
   boost::mutex::scoped_lock l_lock( m_controlMutex);
   if ( m_state == LOGGER_STOPPED )
   {
      m_state = LOGGER_STARTING;
      m_logfile.open( "/var/snowshore/log/MrcpClientLibrary.log",std::ios_base::app);
      if ( m_logfile.is_open())
      {
         m_thread = boost::shared_ptr<boost::thread> ( new boost::thread( boost::bind( &CLogger::ProcessLogItems, this)));
      }
      else
      {
         return -1;
      }
   }
   return 0;
}
////////////////////////////////////////////////////////////////////////// 
//
// Description - Instance
// Input - string - id string identifies the instance
//         
// Output - returns pointer to task processor instance
//			Instantiates a task processir class by ID or returns pointer
//		    if object already instantiated
///////////////////////////////////////////////////////////////////////////
CMrcpTaskProcessor* CMrcpTaskProcessor::Instance(std::string a_idString)
{
	CMrcpTaskProcessorByStringMap::iterator l_pos;
	CMrcpTaskProcessor* l_taskProcessor;

	if ((l_pos = m_taskProcessors.find(a_idString)) == m_taskProcessors.end())
	{
		boost::mutex::scoped_lock l_lock( m_singleton);
		if ((l_pos = m_taskProcessors.find(a_idString)) == m_taskProcessors.end())
		{
			l_taskProcessor = new CMrcpTaskProcessor(a_idString);
			m_taskProcessors.insert(std::make_pair( a_idString, l_taskProcessor));
			if ((l_pos = m_taskProcessors.find(a_idString)) == m_taskProcessors.end())
			{
			    std::string l_errorInfo = "Error creating Event Processor for: " + a_idString;
    			    CLogger::Instance()->Log( LOG_LEVEL_ERROR, l_errorInfo);
			    return NULL;
			}
		}
	}
	return l_pos->second;
}
Esempio n. 15
0
///////////////////////////////////////////////////////////////////////////////
// Function: void ADispatcher::UnregisterForEventDispatch(ATaskHandler *a_eventHandler, long a_event)
//
// Description: Unregister an event handler for an event.  To unregister a 
// task handler for all events, set the event parameter to ALL_EVENTS.
//
// Return: void
//
// Parameters: ATaskHandler *a_eventHandler
//             long a_event
//
///////////////////////////////////////////////////////////////////////////////
void ADispatcher::UnregisterForEventDispatch( ATaskHandler* a_eventHandler, long a_event)
{
    boost::mutex::scoped_lock l_lock( m_mapMutex);

    ATaskHandlerListByEventMap::iterator l_eventHandlerList;
    if ( a_event == ALL_EVENTS)
    {
        for (  l_eventHandlerList = m_eventRegistrations.begin();
               l_eventHandlerList != m_eventRegistrations.end();
               ++l_eventHandlerList)
        {
         ( l_eventHandlerList->second)->erase( a_eventHandler);
        }
    }
    else
    {
        l_eventHandlerList = m_eventRegistrations.find( a_event);
        if ( l_eventHandlerList != m_eventRegistrations.end())
        {
         ( l_eventHandlerList->second)->erase( a_eventHandler);
        }
    }
}
Esempio n. 16
0
/**
* Handle messages
*/
void PROCESSOR::handle_message(int source,int message_id) {
	const PSEARCHER psb = processors[0]->searcher;

	/**************************************************
	* SPLIT  - Search from recieved position
	**************************************************/
	if(message_id == SPLIT) {
		SPLIT_MESSAGE split;
		Recv(source,message_id,&split,sizeof(SPLIT_MESSAGE));
		message_available = 0;

		/*setup board by undoing old moves and making new ones*/
		register int i,score,move,using_pvs;
		if(split.pv_length) {
			for(i = 0;i < split.pv_length && i < psb->ply;i++) {
				if(split.pv[i] != psb->hstack[psb->hply - psb->ply + i].move) 
					break;
			}
			while(psb->ply > i) {
				if(psb->hstack[psb->hply - 1].move) psb->POP_MOVE();
				else psb->POP_NULL();
			}
			for(;i < split.pv_length;i++) {
				if(split.pv[i]) psb->PUSH_MOVE(split.pv[i]);
				else psb->PUSH_NULL();
			}
		} else {
			psb->PUSH_MOVE(split.pv[0]);
		}
		/*reset*/
		SEARCHER::abort_search = 0;
		psb->clear_block();

		/**************************************
		* PVS-search on root node
		*************************************/
		processors[0]->state = GO;

		using_pvs = false;
		psb->pstack->extension = split.extension;
		psb->pstack->reduction = split.reduction;
		psb->pstack->depth = split.depth;
		psb->pstack->alpha = split.alpha;
		psb->pstack->beta = split.beta;
		psb->pstack->node_type = split.node_type;
		psb->pstack->search_state = split.search_state;
		if(psb->pstack->beta != psb->pstack->alpha + 1) {
			psb->pstack->node_type = CUT_NODE;
			psb->pstack->beta = psb->pstack->alpha + 1;
			using_pvs = true;
		}

		/*Search move and re-search if necessary*/
		move = psb->hstack[psb->hply - 1].move;
		while(true) {			
			psb->search();
			if(psb->stop_searcher || SEARCHER::abort_search) {
				move = 0;
				score = 0;
				break;
			}
			score = -psb->pstack->best_score;
			/*research with full depth*/
			if(psb->pstack->reduction
				&& score >= -split.alpha
				) {
					psb->pstack->depth += psb->pstack->reduction;
					psb->pstack->reduction = 0;

					psb->pstack->alpha = split.alpha;
					psb->pstack->beta = split.alpha + 1;
					psb->pstack->node_type = CUT_NODE;
					psb->pstack->search_state = NULL_MOVE;
					continue;
			}
			/*research with full window*/
			if(using_pvs 
				&& score > -split.beta
				&& score < -split.alpha
				) {
					using_pvs = false;

					psb->pstack->alpha = split.alpha;
					psb->pstack->beta = split.beta;
					psb->pstack->node_type = split.node_type;
					psb->pstack->search_state = NULL_MOVE;
					continue;
			}
			break;
		}

		/*undomove : Go to previous ply even if search was interrupted*/
		while(psb->ply > psb->stop_ply - 1) {
			if(psb->hstack[psb->hply - 1].move) psb->POP_MOVE();
			else psb->POP_NULL();
		}

		processors[0]->state = WAIT;

		/***********************************************************
		* Send result back and release all helper nodes we aquired
		***********************************************************/
		PROCESSOR::cancel_idle_hosts();

		MERGE_MESSAGE merge;
		merge.nodes = psb->nodes;
		merge.qnodes = psb->qnodes;
		merge.time_check = psb->time_check;
		merge.splits = psb->splits;
		merge.bad_splits = psb->bad_splits;
		merge.egbb_probes = psb->egbb_probes;

		/*pv*/
		merge.master = split.master;
		merge.best_move = move;
		merge.best_score = score;
		merge.pv_length = 0;

		if(move && score > -split.beta && score < -split.alpha) {
			merge.pv[0] = move;
			memcpy(&merge.pv[1],&(psb->pstack + 1)->pv[psb->ply + 1],
				((psb->pstack + 1)->pv_length - psb->ply ) * sizeof(MOVE));
			merge.pv_length = (psb->pstack + 1)->pv_length - psb->ply;
		}
		/*send it*/
		MPI_Request rq;
		ISend(source,PROCESSOR::MERGE,&merge,MERGE_MESSAGE_SIZE(merge),&rq);
		Wait(&rq);
	} else if(message_id == MERGE) {
		/**************************************************
		* MERGE  - Merge result of move at split point
		**************************************************/
		MERGE_MESSAGE merge;
		Recv(source,message_id,&merge,sizeof(MERGE_MESSAGE));
		

		/*update master*/
		PSEARCHER master = (PSEARCHER)merge.master;
		l_lock(master->lock);

		if(merge.best_move && merge.best_score > master->pstack->best_score) {
			master->pstack->best_score = merge.best_score;
			master->pstack->best_move = merge.best_move;
			if(merge.best_score > master->pstack->alpha) {
				if(merge.best_score > master->pstack->beta) {
					master->pstack->flag = LOWER;

					l_unlock(master->lock);
					master->handle_fail_high();
					l_lock(master->lock);
				} else {
					master->pstack->flag = EXACT;
					master->pstack->alpha = merge.best_score;

					memcpy(&master->pstack->pv[master->ply],&merge.pv[0],
							merge.pv_length * sizeof(MOVE));
					master->pstack->pv_length = merge.pv_length + master->ply;
				}
			}
		}

		/*update counts*/
		master->nodes += merge.nodes;
		master->qnodes += merge.qnodes;
		master->time_check += merge.time_check;
		master->splits += merge.splits;
		master->bad_splits += merge.bad_splits;
		master->egbb_probes += merge.egbb_probes;

		l_unlock(master->lock);
		/* 
		* We finished searching one move from the current split. 
		* Check for more moves there and keep on searching.
		* Otherwise remove the node from the split's helper list, 
		* and add it to the list of idle helpers.
		*/
		l_lock(lock_smp);
		SPLIT_MESSAGE& split = global_split[source];
		if(!master->stop_searcher && master->get_cluster_move(&split,true)) {
			l_unlock(lock_smp);
			ISend(source,PROCESSOR::SPLIT,&split,RESPLIT_MESSAGE_SIZE(split));
		} else {
			if(n_hosts > 2)
				ISend(source,CANCEL);
			else
				available_host_workers.push_back(source);
			l_unlock(lock_smp);
			/*remove from current split*/
			l_lock(master->lock);
			master->host_workers.remove(source);
			master->n_host_workers--;
			l_unlock(master->lock);
		}
		/******************************************************************
		* INIT  - Set up poistion from FEN and prepare threaded search
		******************************************************************/
	} else if(message_id == INIT) {
		INIT_MESSAGE init;
		Recv(source,message_id,&init,sizeof(INIT_MESSAGE));
		
		/*setup board*/
		psb->set_board((char*)init.fen);

		/*make moves*/
		register int i;
		for(i = 0;i < init.pv_length;i++) {
			if(init.pv[i]) psb->do_move(init.pv[i]);	
			else psb->do_null();
		}
#ifdef PARALLEL
		/*wakeup processors*/
		for(i = 0;i < n_processors;i++)
			processors[i]->state = WAIT;
#endif
		/***********************************
		* Distributed transposition table
		************************************/
#if DST_TT_TYPE == 1
	} else if(message_id == RECORD_TT) {
		TT_MESSAGE ttmsg;
		Recv(source,message_id,&ttmsg,sizeof(TT_MESSAGE));
		
		/*record*/
		psb->record_hash(ttmsg.col,ttmsg.hash_key,ttmsg.depth,ttmsg.ply,
					ttmsg.flags,ttmsg.score,ttmsg.move,
					ttmsg.mate_threat,ttmsg.singular);
	} else if(message_id == PROBE_TT) {
		TT_MESSAGE ttmsg;
		Recv(source,message_id,&ttmsg,sizeof(TT_MESSAGE));
		
		/*probe*/
		int proc_id = ttmsg.flags;
		int h_depth,score,mate_threat,singular;
		ttmsg.flags = psb->probe_hash(ttmsg.col,ttmsg.hash_key,ttmsg.depth,ttmsg.ply,
					score,ttmsg.move,ttmsg.alpha,ttmsg.beta,
					mate_threat,singular,h_depth,false);
		ttmsg.depth = h_depth;
		ttmsg.score = (BMP16)score;
		ttmsg.mate_threat = (UBMP8)mate_threat;
		ttmsg.singular = (UBMP8)singular;
		ttmsg.ply = proc_id;  //embed processor_id in message
		/*send*/
		MPI_Request rq;
		ISend(source,PROCESSOR::PROBE_TT_RESULT,&ttmsg,sizeof(TT_MESSAGE),&rq);
		Wait(&rq);
	} else if(message_id == PROBE_TT_RESULT) {
		TT_MESSAGE ttmsg;
		Recv(source,message_id,&ttmsg,sizeof(TT_MESSAGE));
		
		/*copy tt entry to processor*/
		int proc_id = ttmsg.ply;
		PPROCESSOR proc = processors[proc_id];
		proc->ttmsg = ttmsg;
		proc->ttmsg_recieved = true;
#endif
		/******************************************
		* Handle notification (zero-size) messages
		*******************************************/
	} else {
		Recv(source,message_id);
		
		if(message_id == HELP) {
			l_lock(lock_smp);
			if(n_idle_processors == n_processors)
				ISend(source,CANCEL);
			else
				available_host_workers.push_back(source);
			l_unlock(lock_smp);
		} else if(message_id == CANCEL) {
			help_messages--;
		} else if(message_id == QUIT) {
			SEARCHER::abort_search = 1;
		} else if(message_id == GOROOT) {
			message_available = 0;
			SEARCHER::chess_clock.infinite_mode = true;
			int save = processors[0]->state;
			processors[0]->state = GO;
			psb->find_best();
			processors[0]->state = save;
		} else if(message_id == PING) {
			ISend(source,PONG);
		}
	}
}