Example #1
0
/// Worker Worker routine.
/// This is the routine every worker thread runs.
/// The worker thread continuously asks for Tasks and executes them.
///
/// Note that worker threads are NOT guarenteed to ever call Worker.exit()
/// before the program ends.
void workerLoop ( Worker * me, void* args ) {
  task_worker_args* wargs = (task_worker_args*) args;
  TaskManager* tasks = wargs->tasks;
  TaskingScheduler * sched = wargs->scheduler; 

  sched->onWorkerStart();

  StateTimer::setThreadState( StateTimer::FINDWORK );
  StateTimer::enterState_findwork();

  Task nextTask;

  while ( true ) {
    // block until receive work or termination reached
    if (!tasks->getWork(&nextTask)) break; // quitting time

    sched->num_active_tasks++;
    StateTimer::setThreadState( StateTimer::USER );
    StateTimer::enterState_user();
    {
      GRAPPA_PROFILE( exectimer, "user_execution", "", GRAPPA_USER_GROUP );
      nextTask.execute();
    }
    StateTimer::setThreadState( StateTimer::FINDWORK );
    sched->num_active_tasks--;

    sched->thread_yield( ); // yield to the scheduler
  }
}
Example #2
0
  void process_tasks(const atomic<int>* counter)
  {
    TaskQueue& queue = g_queues_ptr[get_thread_id()];  // ASSIGNMENT: use per-thread task queue with "get_thread_id()"

    while (counter->get() != 0) {
      PPP_DEBUG_EXPR(queue.size());
       
      Task* task;

      if (queue.size() > 0) {
        //Dequeue from local queue
        task = queue.dequeue();
      } else {
        // ASSIGNMENT: add task stealing
        int value = rand() % get_thread_count();
        TaskQueue& stolenQueue = g_queues_ptr[value];
        task = stolenQueue.steal();
      }

      if (task != NULL) {
        task->execute(); // overloaded method
        task->post_execute(); // cleanup, method of base class
      }
    }
  }
Example #3
0
void
dispatchTask(Task &task,size_t length)
{
    if (WorkerPool::currentPool() && !WorkerPool::currentPool()->inWorkerThread())
        WorkerPool::currentPool()->dispatch(task,length);
    else
        task.execute(0,length,0);
}
Example #4
0
    void ThreadPool::executeOneTaskIfPossible_CRITICAL_SECTION()
    {
      if ( m_tasks.empty() && m_idleTasks.empty() && (!m_isMainThread.get() || m_mainThreadTasks.empty()) )
        m_stateCond.wait( m_stateMutex );
      else
      {
        std::vector<Task *> *taskQueue;
        if ( m_isMainThread.get() && !m_mainThreadTasks.empty() )
          taskQueue = &m_mainThreadTasks;
        else if ( !m_tasks.empty() )
          taskQueue = &m_tasks;
        else taskQueue = &m_idleTasks;
        
        Task *task = taskQueue->back();
        
        size_t index;
        bool keep;
        task->preExecute_CRITICAL_SECTION( index, keep );
        if ( !keep )
          taskQueue->pop_back();

        m_stateMutex.release();
        try
        {
          task->execute( index );
        }
        catch ( Exception e )
        {
          Util::SimpleString prefixedException = "Exception: " + e.getDesc();
          RC::Handle<LogCollector> logCollector = task->getLogCollector();
          if ( logCollector )
            logCollector->add( prefixedException.data(), prefixedException.length() );
        }
        catch ( ... )
        {
          static Util::SimpleString const genericException = "Exception (unknown)";
          RC::Handle<LogCollector> logCollector = task->getLogCollector();
          if ( logCollector )
            logCollector->add( genericException.data(), genericException.length() );
        }
        m_stateMutex.acquire();
        
        task->postExecute_CRITICAL_SECTION();
        if ( task->completed_CRITICAL_SECTION() )
        {
          void (*finishedCallback)( void * ) = task->getFinishedCallback();
          if ( finishedCallback )
            finishedCallback( task->getFinishedUserdata() );

          task->dispose();
          
          // [pzion 20101108] Must wake waiter because they might be
          // waiting on the task completion
          m_stateCond.broadcast();
        }
      }
    }
Example #5
0
	void workerThread () {
		while ( !m_done ){
			Task task;
			if ( m_taskQueue.try_pop ( task) ){
				task->execute();
			}else{
				std::this_thread::yield();
			}
		}
	}
Example #6
0
	void Dispatcher::process(int count) {
		for (int i = 0; i < count || count == 0; i++) {
			if (m_TaskQueue.size() <= 0)
				break;

			Task* t = m_TaskQueue.top();
			t->execute();
			m_TaskQueue.pop();
		}
	}
Example #7
0
void scheduler_execute(Scheduler* pScheduler)
{
	for (;;)
	{
		Task* pTask = taskqueue_dequeue(&pScheduler->taskQueue);
		if (pTask)
		{
			pTask->execute(pTask->pData);
			_free(pTask);
		}
	}
}
void BusyLeafs::tryPop(){
	boost::unique_lock<boost::shared_mutex> lock(shr_mut);
	if(tasksPool.size()>0){
		//cout<<"core id: "<<id<<" pop data from list"<<endl;
		Task* t = tasksPool.at(0);
		tasksPool.pop_front();
		lock.unlock();
		//cout<<"core id: "<<id<<" ";
		//t->toString();
		t->execute();
	}
}
    virtual void
    execute()
    {
        // For each fetched message:
        Task task;
        while (m_input_queue.popT(task, true))
        {
            task->execute();
            m_output_queue.push(task);
        }

        assert(m_input_queue.is_cancelled());
    }
Example #10
0
void
WorkerThread::run ()
{
    //
    // Signal that the thread has started executing
    //

    _data->threadSemaphore.post();

    while (true)
    {
	//
        // Wait for a task to become available
	//

        _data->taskSemaphore.wait();

        {
            Lock taskLock (_data->taskMutex);
    
	    //
            // If there is a task pending, pop off the next task in the FIFO
	    //

            if (_data->numTasks > 0)
            {
                Task* task = _data->tasks.front();
		TaskGroup* taskGroup = task->group();
                _data->tasks.pop_front();
                _data->numTasks--;

                taskLock.release();
                task->execute();
                taskLock.acquire();

                delete task;
                taskGroup->_data->removeTask();
            }
            else if (_data->stopped())
	    {
                break;
	    }
        }
    }
}
Example #11
0
void ThreadPoolThread::run()
{
	Task *task;

	while (1)
	{
		if ( this->threadPool->queue.pop( task, 3 ) )	// 3秒超时
		{
			task->execute();
			delete task;
		}
		if ( this->needStop )
		{
			this->needStop = false;
			return;
		}
	}
}
Example #12
0
File: Worker.cpp Project: kri5/zia
/// Here we are in the first threaded method
void            Worker::code()
{
    while (this->_running)
    {
        Task*   t = this->_pool->popTask();
        if (t != NULL)
        {
            t->execute();
            if (t->isFree() == true)
                this->_pool->finishTask(t);
        }
        else
        {
            this->_pool->addSleepingThread(this);
            //Logger::getInstance() << Logger::Info << Logger::PrintStdOut << "Sending thread " << this->m_pid << " to bed" << Logger::Flush;
            this->checkSleep(true);
        }
    }
}
Example #13
0
// Worker thread
gpointer Thread::non_static_thread_func()
{
    while (true)
    {
        g_mutex_lock(queue_mutex);
        while (task_queue.empty())
        {
            if (thread_cmd != CMD_NONE)
            {
                g_mutex_unlock(queue_mutex);
                goto CleanUp;
            }

            g_cond_wait(queue_cond, queue_mutex);
        }

        if (thread_cmd == CMD_TERMINATE)
        {
            g_mutex_unlock(queue_mutex);
            goto CleanUp;
        }

        // Get task from task queue
        Task *task = task_queue.front();
        task_queue.pop_front();

        g_mutex_unlock(queue_mutex);

        // About executing task, update running_task variable.
        running_task = task;
        task->execute();
        // Task execution done, update running_task variable.
        running_task = 0;

        // Release task object
        delete task;
    }

CleanUp:
    clear_all();
    return 0;
}
Example #14
0
void Thread::execute() {
	while (!this->exited) {

		if (this->queue.size()) {
			this->isAvailable = false;

			EnterCriticalSection(this->section);
				Task *task = this->queue.front();
				this->queue.pop();
			LeaveCriticalSection(this->section);

			task->execute();
			this->isAvailable = true;
		}
		else {
			Sleep(3000); //check queue after 3 sec
		}

	}
}
 void HostInputHandler::scheduleTask ( Task task){
     //m_hostTaskQueue.add ( task );
     task->execute();
 }
Example #16
0
    void *Task::run(void *arg)
    {
        Task *p = reinterpret_cast<Task *>(arg);
        struct sched_param sched_attr;
        int policy;
        pthread_t tid = pthread_self();
        Scheduler &sched = Scheduler::get_instance();

        m_thread_syncpoint.lock();
        m_thread_syncpoint.condition_satisfied();

        /* Set the scheduler parameters. Linux at least fails when the
         * parameters are set before creating the thread. The parameters must
         * be set BY the affected thread. Stupid little penguins!!!
         */
        if(pthread_getschedparam(tid, &policy, &sched_attr) != 0) {
            m_thread_syncpoint.release();
            m_thread_syncpoint.lock();
            EPRINTF("Failed getting task schedule parameters\n");
            return NULL;
        }

        policy = SCHED_FIFO;
        sched_attr.sched_priority = p->m_props.prio;
        if(pthread_setschedparam(tid, policy, &sched_attr) != 0) {
            m_thread_syncpoint.release();
            m_thread_syncpoint.unlock();
            EPRINTF("Failed setting task schedule parameters\n");
            EPRINTF("Did you remember sudo?\n");
            return NULL;
        }

        /* Make sure the thread is being scheduled at the right priority. Linux
         * requires that the application be run as root or permissions have
         * been set through PAM to allow use of real time thread priority
         * scheduling. Otherwise, the checks below will fail and we'll exit the
         * thread.
         */
        if(pthread_getschedparam(tid, &policy, &sched_attr) != 0) {
            m_thread_syncpoint.release();
            m_thread_syncpoint.unlock();
            EPRINTF("Failed getting task schedule parameters\n");
            return NULL;
        }

        if(SCHED_FIFO != policy) {
            m_thread_syncpoint.release();
            m_thread_syncpoint.unlock();
            EPRINTF("Failed to set real time scheduling policy\n");
            return NULL;
        }

        if(sched_attr.sched_priority != static_cast<int>(p->m_props.prio)) {
            m_thread_syncpoint.release();
            m_thread_syncpoint.unlock();
            EPRINTF("Failed to set real time priority for task %s\n",
                    p->m_name);
            return NULL;
        }

        DPRINTF("Successfully registered task %s at priority %d\n", p->m_name,
                sched_attr.sched_priority);

        units::Nanoseconds ref_time(0);

        p->m_operational = true;
        m_thread_syncpoint.release();
        m_thread_syncpoint.unlock();

        while(true) {
            units::Nanoseconds start_time(0);
            get_time(start_time);

            /* If this is a periodic tasks, wait to be scheduled.
            */
            if(p->m_impl->rategroup_sync) {
                if(p->m_impl->first_pass) {
                    /* We grab the lock and hold it while we're executing so no
                     * other task jumps in. One task at a time per
                     * rategroup!  The lock gets released once we get into the
                     * wait call.
                     */
                    p->m_impl->rategroup_sync->lock();

                    /* Compute a wake up time to be used on the first pass.
                    */
                    p->m_impl->expected_wake_time = get_reference_time();
                    p->m_impl->expected_wake_time -=
                        p->m_impl->expected_wake_time % p->m_props.period;

                    p->m_impl->first_pass = false;
                }

                /* We use reference time as a post condition for the wait. The
                 * reference time when we wake up should be later than the time
                 * at which we went to sleep. Otherwise, we've experienced a
                 * spurious wakeup.
                 */
                p->m_impl->expected_wake_time += p->m_props.period;
                DPRINTF("%s: expected wake time = %" PRId64 "\n", p->m_name,
                        int64_t(p->m_impl->expected_wake_time));

                while((true == p->m_operational) &&
                      (ref_time = get_reference_time()) <
                          p->m_impl->expected_wake_time) {
                    DPRINTF("%s: Tref = %" PRId64
                            ", expected_wake_time = %" PRId64 "\n",
                            p->m_name, int64_t(ref_time),
                            int64_t(p->m_impl->expected_wake_time));
                    if(false == p->m_impl->rategroup_sync->wait()) {
                        EPRINTF("%s: Error in periodic wait\n", p->m_name);
                        p->m_impl->rategroup_sync->unlock();
                        return NULL;
                    }
                    DPRINTF("%s:Woke up\n", p->m_name);
                }

                DPRINTF("%s: Tref = %" PRId64 ", expected_wake_time = %" PRId64
                        "\n",
                        p->m_name, int64_t(ref_time),
                        int64_t(p->m_impl->expected_wake_time));

                /* Wait for all of the tasks to be awoken and ready to run.
                 * This relies on testing against an inverted wait condition
                 * because we're not going to clear the condition until all the
                 * tasks are awake. The end of frame task is allowed to begin
                 * execution because it is the last task on the list (in
                 * priority order). The end of frame task signals all of the
                 * others that they may proceed.
                 */
                if(false == p->m_is_eof_task) {
                    p->m_impl->rategroup_sync->inverse_wait();
                }
            }

            if(false == p->m_operational) {
                DPRINTF("%s: No longer operational\n", p->m_name);
                if(p->m_impl->rategroup_sync) {
                    p->m_impl->rategroup_sync->unlock();
                }

                return NULL;
            }

            if(p->m_props.is_present_in_schedule(sched.get_schedule()) ||
               (0 == p->m_props.period)) {
                DPRINTF("Executing task %s\n", p->m_name);
                if(false == p->execute()) {
                    DPRINTF("Task %s exiting\n", p->m_name);
                    p->terminate();
                    if(p->m_impl->rategroup_sync) {
                        p->m_impl->rategroup_sync->unlock();
                    }
                    return NULL;
                }
                units::Nanoseconds end_time(0);
                get_time(end_time);

                p->m_props.last_runtime = end_time - start_time;
                if(p->m_props.last_runtime > p->m_props.max_runtime) {
                    p->m_props.max_runtime = p->m_props.last_runtime;
                }
                DPRINTF("%s last_runtime = %" PRId64 ", max_runtime = %" PRId64
                        "\n",
                        p->m_name, int64_t(p->m_props.last_runtime),
                        int64_t(p->m_props.max_runtime));
            }
        }

        return NULL;
    }