示例#1
0
void QuickVerifyTask::Detect( const DM::PORT * port )
{
	assert( port != nullptr );
	ExecuteTask( port->number , 0 , 0 );

	emit finish_detect();
}
示例#2
0
文件: mos.cpp 项目: dervish77/adgf
/*  mosSequenceTaskListOnce		- execute all tasks once
 *
 *  Parameters:
 *	none
 *
 *  Returns:
 *	none
 */
void mosSequenceTaskListOnce(void)
{
    int index;

    for (index = 0; index < gbContext->numtasks; index++)
    {
        // execute task if it is valid
        if (gbContext->tasklist[index].valid)
        {
            if (gbContext->tasklist[index].blocked)
            {
                if (gbContext->verbose) mosPrint("... taskid %d is blocked ...\n", gbContext->tasklist[index].taskid);
                gbContext->tasklist[index].ready = TRUE;
                gbContext->tasklist[index].blocked = FALSE;
            }
            else if (gbContext->tasklist[index].suspend)
            {
                if (gbContext->verbose) mosPrint("... taskid %d is suspended ...\n", gbContext->tasklist[index].taskid);
                gbContext->tasklist[index].ready = TRUE;
                gbContext->tasklist[index].suspend = FALSE;
            }
            else if (gbContext->tasklist[index].ready)
            {
                if (gbContext->verbose) mosPrint("... taskid %d - ", gbContext->tasklist[index].taskid);
                ExecuteTask(index);
            }
        }
     
        hwCheckTaskTimer(gbContext->ticktime);
    }
}
示例#3
0
/*
 *  Main function
 */
void main(void) 
{
	TRISB = 0x00;            //configuring all portb bits as outputs
    PORTB = 0;               //force 0 state to all outputs
    
    TRISA = 0xFF;            //configuring all porta bits as inputs
    
    //init displays´ values
    SetValuesDisplays(0,0,0);
    
    //init time counter
    InitTimeCounter();
    
    //init tasks and config Timer0
    InitTasks();   
	ConfigTimer0();
    
	//main loop 
    while(1)                      
    {
        //Verification: check if there´s a task to be executed
        if ((Timer0IntGeneraed == YES)  && (NUMBER_OF_TASKS)) 
        {
            Timer0IntGeneraed = NO;  
            ExecuteTask();			
        }
    }
}
示例#4
0
EBTNodeResult::Type UBTTaskNode::ExecuteTask(UBehaviorTreeComponent* OwnerComp, uint8* NodeMemory)
{
	if (OwnerComp)
	{
		return ExecuteTask(*OwnerComp, NodeMemory);
	}
	return EBTNodeResult::Failed;
}
示例#5
0
	void operator()()
	{
		while(true){
			TaskInfoPtr info = TaskInfoPtr();
			task_queue.pop(info);
			if (info)
				ExecuteTask(info);
		}
	}
示例#6
0
文件: Tasks.cpp 项目: Manuzor/LispCpp
void ezTaskSystem::WaitForTask(ezTask* pTask)
{
  if (pTask->IsTaskFinished())
    return;

  EZ_PROFILE(s_ProfileWaitForTask);

  const bool bIsMainThread = ezThreadUtils::IsMainThread();

  ezTaskPriority::Enum FirstPriority = ezTaskPriority::EarlyThisFrame;
  ezTaskPriority::Enum LastPriority = ezTaskPriority::LateNextFrame;

  if (bIsMainThread)
  {
    // if this is the main thread, we need to execute the main-thread tasks
    // otherwise a dependency on which pTask is waiting, might not get fulfilled
    FirstPriority = ezTaskPriority::ThisFrameMainThread;
    LastPriority = ezTaskPriority::SomeFrameMainThread;
  }

  while (!pTask->IsTaskFinished())
  {
    // we only execute short tasks here, because you should never WAIT for a long running task
    // and a short task should never have a dependency on a long running task either
    // so we assume that only short running tasks need to be executed to fulfill the task's dependencies
    // Since there are threads to deal with long running tasks in parallel, even if we were waiting for such
    // a task, it will get finished at some point
    if (!ExecuteTask(FirstPriority, LastPriority, pTask))
    {
      // if there was nothing for us to do, that probably means that the task is either currently being processed by some other thread
      // or it is in a priority list that we did not want to work on (maybe because we are on the main thread)
      // in this case try it again with non-main-thread tasks

      if (!pTask->IsTaskFinished() && !ExecuteTask(ezTaskPriority::EarlyThisFrame, ezTaskPriority::LateNextFrame, pTask))
      {
        // if there is STILL nothing for us to do, it might be a long running task OR it is already being processed
        // we won't fall back to processing long running tasks, because that might stall the application
        // instead we assume the task (or any dependency) is currently processed by another thread
        // and to prevent a busy loop, we just give up our time-slice and try again later
        ezThreadUtils::YieldTimeSlice();
      }
    }
  }
}
示例#7
0
void Thread::Execute(){
	while(!END()){
		if(DONE()){
			sleep();
		}else{
			ExecuteTask();
		}
	}
	terminate=true;
}
示例#8
0
WelsErrorType CWelsSliceEncodingTask::Execute() {
  WelsThreadSetName ("OpenH264Enc_CWelsSliceEncodingTask_Execute");

  int32_t iReturn = InitTask();
  WELS_VERIFY_RETURN_IFNEQ (iReturn, ENC_RETURN_SUCCESS)

  iReturn = ExecuteTask();

  FinishTask();
  return ENC_RETURN_SUCCESS;
}
示例#9
0
WelsErrorType CWelsSliceEncodingTask::Execute() {
  WelsThreadSetName ("OpenH264Enc_CWelsSliceEncodingTask_Execute");

  m_eTaskResult = InitTask();
  WELS_VERIFY_RETURN_IFNEQ (m_eTaskResult, ENC_RETURN_SUCCESS)

  m_eTaskResult = ExecuteTask();

  FinishTask();
  return m_eTaskResult;
}
示例#10
0
WelsErrorType CWelsSliceEncodingTask::Execute() {
  //fprintf(stdout, "OpenH264Enc_CWelsSliceEncodingTask_Execute, %x, sink=%x\n", this, m_pSink);

  m_eTaskResult = InitTask();
  WELS_VERIFY_RETURN_IFNEQ (m_eTaskResult, ENC_RETURN_SUCCESS)

  m_eTaskResult = ExecuteTask();

  FinishTask();

  //fprintf(stdout, "OpenH264Enc_CWelsSliceEncodingTask_Execute Ends\n");
  return m_eTaskResult;
}
示例#11
0
文件: engine.cpp 项目: Twiebs/venom
void FinalizeEngineTasks(Engine *engine) {



#ifdef VENOM_SINGLE_THREADED
  for (size_t i = 0; i < engine->tasksToExecute.count; i++) {
    if (ExecuteTask(&engine->workers[0], engine->tasksToExecute[i])) {
      engine->tasksToFinalize.PushBack(engine->tasksToExecute[i]);
    }
  }
  engine->tasksToExecute.count = 0;
#endif//VENOM_SINGLE_THREADED

  for (size_t i = 0; i < engine->tasksToFinalize.count; i++)
    FinalizeTask(&engine->workers[0], engine->tasksToFinalize[i]);
  engine->tasksToFinalize.count = 0;
}
void UHTNPlannerComponent::ProcessPendingExecution()
{
	//GEngine->AddOnScreenDebugMessage(-1, 1.5f, FColor::Yellow, TEXT("UHTNPlannerComponent::ProcessPendingExecution()"));

	// can't continue if current task is still aborting
	if(bWaitingForAbortingTasks || !PendingExecution.IsValid())
	{
		//GEngine->AddOnScreenDebugMessage(-1, 1.5f, FColor::Yellow, TEXT("Returning in UHTNPlannerComponent::ProcessPendingExecution()"));
		if(!PendingExecution.IsValid())
		{
			bIsRunning = false;
		}

		return;
	}

	ExecuteTask(PendingExecution);
}
示例#13
0
void QuickVerifyTask::VerifyBlock( const DM::PORT * port, qlonglong offset , qlonglong sector_count )
{
	assert( port != nullptr );
	auto disk =  disk_master_->getDiskDevice( port );
	assert ( disk != nullptr );
	assert( disk->getSize() <= (qlonglong)( offset + sector_count ) );

	currentSector_ = offset_;
	DWORD block_size = MaxSectorsCount;
	qlonglong prev_sector = currentSector_;

	while( currentSector_ < sector_count )
	{
		if ( task_break_ )
			break;
		if( (qlonglong) (currentSector_ + block_size ) > sector_count )
		{
			block_size = (DWORD) (sector_count - currentSector_);
		}

		qDebug() << "QuickVefifyTask is starting...(" << currentSector_ << ")";
		currentSector_ ;
		bExecuting_ = true;
		if ( ExecuteTask( port->number , currentSector_ , block_size ) )
		{
			/*sectors += block_size;*/
			if ( block_size < MaxSectorsCount )
				block_size *= 2;
		}
		else
		{
			block_size = MinSectorsCount;
			//sectors = currentSector_;
		}
		bExecuting_ = false;

	}
	if ( task_break_ )
		emit break_task( currentSector_ );
	else
		emit finish_task( this->getLastSector() );
}
示例#14
0
文件: engine.cpp 项目: Twiebs/venom
static void WorkerThreadProc(Worker *worker) {
#ifndef VENOM_SINGLE_THREADED
  g_threadID = worker->workerID;
  LogDebug("Worker %u has started", worker->workerID);
  Engine *engine = &g_engine;
  while (engine->isEngineRunning) {
    engine->workLock.lock();
    if (engine->tasksToExecute.count > 0) {
      Task task = engine->tasksToExecute[engine->tasksToExecute.count - 1];
      engine->tasksToExecute.count -= 1;
      engine->workLock.unlock();
      ExecuteTask(worker, task);
      engine->workLock.lock();
      engine->tasksToFinalize.PushBack(task);
      engine->workLock.unlock();
    } else {
      engine->workLock.unlock();
    }
  }

  LogDebug("Worker %u has exited", worker->workerID);
#endif//VENOM_SINGLE_THREADED
}
示例#15
0
/*
 *  Main function
 */
void main(void) 
{
	/*
     * example of input and output configuration. It can be changed according to what you need
     */
    TRISB = 0x00;            //configures all bits of PORTB as outputs
    PORTB = 0;               //sets all portb´s outputs to 0    
    TRISA = 0xFF;            //configures all bits of PORTA as inputs
    
    //init tasks and config Timer0
    InitTasks();   
	ConfigTimer0();
    
	//main loop 
    while(1)                      
    {
        //Verification: check if there´s a task to be executed
        if ((Timer0IntGeneraed == YES)  && (NUMBER_OF_TASKS)) 
        {
            Timer0IntGeneraed = NO;  
            ExecuteTask();			
        }
    }
}
示例#16
0
void Scheduler::CheckTasks()
{
	m_mutexTaskList.Lock();

	time_t tCurrent = time(NULL);
	struct tm tmCurrent;
	localtime_r(&tCurrent, &tmCurrent);

	struct tm tmLastCheck;

	if (m_bDetectClockChanges)
	{
		// Detect large step changes of system time 
		time_t tDiff = tCurrent - m_tLastCheck;
		if (tDiff > 60*90 || tDiff < -60*90)
		{
			debug("Reset scheduled tasks (detected clock adjustment greater than 90 minutes)");
			m_bExecuteProcess = false;
			m_tLastCheck = tCurrent;

			for (TaskList::iterator it = m_TaskList.begin(); it != m_TaskList.end(); it++)
			{
				Task* pTask = *it;
				pTask->m_tLastExecuted = 0;
			}
		}
	}

	localtime_r(&m_tLastCheck, &tmLastCheck);

	struct tm tmLoop;
	memcpy(&tmLoop, &tmLastCheck, sizeof(tmLastCheck));
	tmLoop.tm_hour = tmCurrent.tm_hour;
	tmLoop.tm_min = tmCurrent.tm_min;
	tmLoop.tm_sec = tmCurrent.tm_sec;
	time_t tLoop = mktime(&tmLoop);

	while (tLoop <= tCurrent)
	{
		for (TaskList::iterator it = m_TaskList.begin(); it != m_TaskList.end(); it++)
		{
			Task* pTask = *it;
			if (pTask->m_tLastExecuted != tLoop)
			{
				struct tm tmAppoint;
				memcpy(&tmAppoint, &tmLoop, sizeof(tmLoop));
				tmAppoint.tm_hour = pTask->m_iHours;
				tmAppoint.tm_min = pTask->m_iMinutes;
				tmAppoint.tm_sec = 0;

				time_t tAppoint = mktime(&tmAppoint);
				int iWeekDay = tmAppoint.tm_wday;
				if (iWeekDay == 0)
				{
					iWeekDay = 7;
				}

				bool bWeekDayOK = pTask->m_iWeekDaysBits == 0 || (pTask->m_iWeekDaysBits & (1 << (iWeekDay - 1)));
				bool bDoTask = bWeekDayOK && m_tLastCheck < tAppoint && tAppoint <= tCurrent;

				//debug("TEMP: 1) m_tLastCheck=%i, tCurrent=%i, tLoop=%i, tAppoint=%i, bWeekDayOK=%i, bDoTask=%i", m_tLastCheck, tCurrent, tLoop, tAppoint, (int)bWeekDayOK, (int)bDoTask);

				if (bDoTask)
				{
					ExecuteTask(pTask);
					pTask->m_tLastExecuted = tLoop;
				}
			}
		}
		tLoop += 60*60*24; // inc day
		localtime_r(&tLoop, &tmLoop);
	}

	m_tLastCheck = tCurrent;

	m_mutexTaskList.Unlock();
}
//*=================================================================================
//*原型: void TSmartServer::Run()
//*功能: 服务器执行服务
//*参数: 无
//*返回: 无
//*说明: 前置采集服务器类
//*=================================================================================
void TSmartServer::Run()
{
	bool bReloadFlag = false;
	try
	{
		m_OutThread.Start();
		m_SmartMonitor.Start();

		//如果没有获取到系统的设备档案数据,则始终向金仕达发获取档案的任务.
		while(1 )
		{
			if( GetDocCount() <= 0 )
			{
				ReportLog("没有设备档案信息! 服务器自动重新装入设备档案表!");
				InitSmartDocList();
				Sleep(4000);				
			}
			else
			{
				break;
			}
		}

		bool  bRet=false;
		long  nNoTaskTick=0,nTick=0,nCollDataTick=0;
		
		m_CollDataTaskObj.nTaskPlanID  = 100 ;
		strcpy(m_CollDataTaskObj.szTaskName, "收数");
		m_CollDataTaskObj.nTaskCycle   = 1 ;
		GetCurDateTime(m_CollDataTaskObj.szBeginTime);
		m_CollDataTaskObj.nRepeatTime  = 0 ;                 //持续多长时间(分钟)
		m_CollDataTaskObj.nRepeatTimes = 0;                //重复次数

		int  k = 0;
		long i = 0 ;
		for(i=0; i< m_DocList.GetCount(); i++)
		{
			TSmartDocObj *pObj = (TSmartDocObj*)m_DocList[i];
			if(!pObj->m_nParentID)
			{
				m_CollDataTaskObj.pTask[k].nTaskID     = i + 100 ;
				m_CollDataTaskObj.pTask[k].nTaskPlanID = 01;
				m_CollDataTaskObj.pTask[k].nAuthID     = pObj->m_nAuthID ;
				strcpy(m_CollDataTaskObj.pTask[k].szTaskCode, "01");
				strcpy(m_CollDataTaskObj.pTask[k].szName, "实时收数");
				strcpy(m_CollDataTaskObj.pTask[k].szType, "01");
				m_CollDataTaskObj.pTask[k].nPriority = 0 ;
				m_CollDataTaskObj.nTask++;  //设备总数量
				k++;
			}
		}
		m_CollDataTaskObj.SetBeginTime();
		ExecuteTask(&m_CollDataTaskObj);
		//执行任务
		static TSmartTaskObj curObj;
		BOOL bNoTask=FALSE;
		int ret=-1;
		nCollDataTick = GetTickCount();
		while( !IsShutdown() )
		{	
				bRet = false;
				//如果后台没有任务要处理,则开始采集数据任务
				while(bNoTask)
				{
					ExecuteCollDataTask();
					nTick = GetTickCount();
					nCollDataTick = nTick;
					//如果采集数据时间超过了2秒钟,则跳出,开始向后台请求新的任务
					if(nTick-nNoTaskTick>=m_nNoTask_Tick)//2秒
					{
						break;
					}
					Sleep(1000);
				}
				nTick=GetTickCount();
				//如果距离上次采集数据的时间已经超过了5秒钟,则再发起一次采集数据的任务
				if(nTick-nCollDataTick>=m_nCollData_Tick)
				{
					nCollDataTick = nTick;
					ExecuteCollDataTask();
				}
				//发送心跳请求包
				ret=m_Channels.SendTick();
				if(ret!=RET_OK)
				{
					continue;
				}
				ZeroMemory(&curObj, sizeof(curObj));
				curObj.Clear();
				ret=m_Channels.GetSmartTaskPlan(&curObj);
				switch(ret)
				{	
					case RET_OK:
						ReportLog("选中任务: %s(%d), 执行次数:%d, 执行时长:%d, 设备数:%d.\n",curObj.szTaskName, curObj.nTaskPlanID,curObj.nRepeatTimes, curObj.nRepeatTime, curObj.nTask); 
						curObj.SetBeginTime();
						bRet = ExecuteTask(&curObj);
						if(!bRet)
						{
							for(int j=0; j< curObj.nTask; j++)
							{
								TSResultData  data;
								ZeroMemory(&data, sizeof(data));
								strcpy(data.sMsg, "任务执行失败[可能终端正忙]!");
								m_Channels.ReportTaskResult(&curObj.pTask[j], RET_TERME_NOANSWER, &data);
							}
						}
						bNoTask=FALSE;
						break;
					case RET_NOTASK:
						ReportLog("金仕达没有任务下达"); 
						nNoTaskTick = GetTickCount();
						bNoTask=TRUE;
						break;
					default:
						ReportLog("向金仕达请求任务失败"); 
						bNoTask=FALSE;
						break;
				}
			/*
			if(!GetMemoryInfo())
			{
				ReportError("获取系统内存信息失败,请关闭前置程序,重新启动!");				
			}
			printf("TotalMemory %dK,UseMemory %dK,FreeMemory %dK\n",nTotalMemory,nUseMemory,nFreeMemory);	
			if(nFreeMemory/1024<10)
			{
				ReportError("可用内存小于10M,请关闭一些其它无用的程序");				
			}
			*/
		}
		m_OutThread.Shutdown();
	}
	catch(TException& e)
	{
		ReportError(e.GetText());
		printf("------ e error ----\n");
		Shutdown();
	}
	catch(...)
	{
		ReportError("未知的错误导致服务器终止!");
		Shutdown();
	}

	PostQuitMessage(0);
}
示例#18
0
void ezTaskSystem::WaitForTask(ezTask* pTask)
{
  if (pTask->IsTaskFinished())
    return;

  EZ_PROFILE_SCOPE("WaitForTask");

  const bool bIsMainThread = ezThreadUtils::IsMainThread();
  const bool bIsLoadingThread = IsLoadingThread();

  ezTaskPriority::Enum FirstPriority = ezTaskPriority::EarlyThisFrame;
  ezTaskPriority::Enum LastPriority = ezTaskPriority::LateNextFrame;

  // this specifies whether WaitForTask may fall back to processing standard tasks, when there is no more specific work available
  // in some cases we absolutely want to avoid that, since it can produce deadlocks
  // E.g. on the loading thread, if we are in the process of loading something and then we have to wait for something else,
  // we must not start that work on the loading thread, because once THAT task runs into something where it has to wait for something
  // to be loaded, we have a circular dependency on the thread itself and thus a deadlock
  bool bAllowDefaultWork = true;

  if (bIsMainThread)
  {
    // if this is the main thread, we need to execute the main-thread tasks
    // otherwise a dependency on which pTask is waiting, might not get fulfilled
    FirstPriority = ezTaskPriority::ThisFrameMainThread;
    LastPriority = ezTaskPriority::SomeFrameMainThread;

    /// \todo It is currently unclear whether bAllowDefaultWork should be false here as well (in which case the whole fall back mechanism
    /// could be removed)
    bAllowDefaultWork = false;
  }
  else if (bIsLoadingThread)
  {
    FirstPriority = ezTaskPriority::FileAccessHighPriority;
    LastPriority = ezTaskPriority::FileAccess;
    bAllowDefaultWork = false;
  }

  while (!pTask->IsTaskFinished())
  {
    // we only execute short tasks here, because you should never WAIT for a long running task
    // and a short task should never have a dependency on a long running task either
    // so we assume that only short running tasks need to be executed to fulfill the task's dependencies
    // Since there are threads to deal with long running tasks in parallel, even if we were waiting for such
    // a task, it will get finished at some point
    if (!ExecuteTask(FirstPriority, LastPriority, pTask))
    {
      if (!pTask->IsTaskFinished())
      {
        // if there was nothing for us to do, that probably means that the task is either currently being processed by some other thread
        // or it is in a priority list that we did not want to work on (maybe because we are on the main thread)
        // in this case try it again with non-main-thread tasks

        // if bAllowDefaultWork is false, we just always yield here

        if (!bAllowDefaultWork || !ExecuteTask(ezTaskPriority::EarlyThisFrame, ezTaskPriority::LateNextFrame, pTask))
        {
          // if there is STILL nothing for us to do, it might be a long running task OR it is already being processed
          // we won't fall back to processing long running tasks, because that might stall the application
          // instead we assume the task (or any dependency) is currently processed by another thread
          // and to prevent a busy loop, we just give up our time-slice and try again later
          ezThreadUtils::YieldTimeSlice();
        }
      }
    }
  }
}
示例#19
0
void Scheduler::CheckTasks()
{
	PrepareLog();

	{
		Guard guard(m_taskListMutex);

		time_t current = Util::CurrentTime();

		if (!m_taskList.empty())
		{
			// Detect large step changes of system time
			time_t diff = current - m_lastCheck;
			if (diff > 60 * 90 || diff < 0)
			{
				debug("Reset scheduled tasks (detected clock change greater than 90 minutes or negative)");

				// check all tasks for the last week
				m_lastCheck = current - 60 * 60 * 24 * 7;
				m_executeProcess = false;

				for (Task* task : &m_taskList)
				{
					task->m_lastExecuted = 0;
				}
			}

			time_t localCurrent = current + g_Options->GetLocalTimeOffset();
			time_t localLastCheck = m_lastCheck + g_Options->GetLocalTimeOffset();

			tm tmCurrent;
			gmtime_r(&localCurrent, &tmCurrent);
			tm tmLastCheck;
			gmtime_r(&localLastCheck, &tmLastCheck);

			tm tmLoop;
			memcpy(&tmLoop, &tmLastCheck, sizeof(tmLastCheck));
			tmLoop.tm_hour = tmCurrent.tm_hour;
			tmLoop.tm_min = tmCurrent.tm_min;
			tmLoop.tm_sec = tmCurrent.tm_sec;
			time_t loop = Util::Timegm(&tmLoop);

			while (loop <= localCurrent)
			{
				for (Task* task : &m_taskList)
				{
					if (task->m_lastExecuted != loop)
					{
						tm tmAppoint;
						memcpy(&tmAppoint, &tmLoop, sizeof(tmLoop));
						tmAppoint.tm_hour = task->m_hours;
						tmAppoint.tm_min = task->m_minutes;
						tmAppoint.tm_sec = 0;

						time_t appoint = Util::Timegm(&tmAppoint);

						int weekDay = tmAppoint.tm_wday;
						if (weekDay == 0)
						{
							weekDay = 7;
						}

						bool weekDayOK = task->m_weekDaysBits == 0 || (task->m_weekDaysBits & (1 << (weekDay - 1)));
						bool doTask = weekDayOK && localLastCheck < appoint && appoint <= localCurrent;

						//debug("TEMP: 1) m_tLastCheck=%i, tLocalCurrent=%i, tLoop=%i, tAppoint=%i, bWeekDayOK=%i, bDoTask=%i", m_tLastCheck, tLocalCurrent, tLoop, tAppoint, (int)bWeekDayOK, (int)bDoTask);

						if (doTask)
						{
							ExecuteTask(task);
							task->m_lastExecuted = loop;
						}
					}
				}
				loop += 60 * 60 * 24; // inc day
				gmtime_r(&loop, &tmLoop);
			}
		}

		m_lastCheck = current;
	}

	PrintLog();
}