void named_worker_thread::run()
{
    scope_guard notify_exception(boost::bind(&named_worker_thread::died_unexpect,this));
    while(execute_task()) {}
    notify_exception.disable();
    //printf("named thread end in normal way.\n");
}
int main(int argc, char* argv[]) {
	SharedVariable v;

	int runningTimeInSec = 10;
	if (argc == 2) {
		runningTimeInSec = atoi(argv[1]);
	}

	if (wiringPiSetup() == -1) {
		printf("Failed to setup wiringPi.");
		return 1; 
	}

	printf("start");
	// Initialize for the interfaces provided
	signal(SIGINT, signal_handler);
	init_deferred_buffer(1024*1024); // 1MB
	init_userspace_governor();
	init_workload();
	printf("stop");

	// Initializers that you need to implement
	init_shared_variable(&v);
	init_sensors(&v);
	learn_workloads(&v);

	printf("Init scheduler start");
	// Init scheduler
	int aliveTasks[NUM_TASKS];
	init_scheduler(runningTimeInSec);
	set_by_max_freq(); // reset to the max freq

	printf("Init scheduler stop");
	printDBG("Start Scheduling with %d threads\n", NUM_TASKS);
	TaskSelection sel;
	long long idleTime;
	while (v.bProgramExit != 1) {
		// 1. Prepare tasks
		idleTime = prepare_tasks(aliveTasks, &v);
		if (idleTime < 0)
			break;

		// 2. Select a process: You need to implement.
		sel = select_task(&v, aliveTasks, idleTime); 
		if (sel.task < 0)
			break;

		// 3. Run the selected task
		execute_task(sel);
	}

	finish_workload();
	release_buffer(fileno(stdout));
	printf("Scheduler program finished.\n");

	return 0;
}
Esempio n. 3
0
 void TaskManager::wait_task(Task* task)
 {
     while (task->m_dependent_tasks)
     {
         auto* new_task = get_task();
         if (new_task)
         {
             execute_task(new_task);
         }
     }
 }
Esempio n. 4
0
void *worker_handler(void *data)
{
  struct mproc_state *mps = (struct mproc_state *) data;
	struct task_desc *task;

	while(!(*(mps->kill_master))) {
		task = dequeue(mps->incoming);
		task = execute_task(task);
		enqueue(task, mps->results);
	}

	return NULL;
}
Esempio n. 5
0
static hclib_task_t *find_and_run_task(hclib_worker_state *ws,
        const int on_fresh_ctx, volatile int *flag, const int flag_val,
        finish_t *current_finish) {
    hclib_task_t *stolen[STEAL_CHUNK_SIZE];
    hclib_task_t *task = locale_pop_task(ws);

    if (!task) {
        while (*flag != flag_val) {
            // try to steal
            // task = locale_steal_task(ws);
            int victim;
            const int nstolen = locale_steal_task(ws, (void **)stolen, &victim);
            if (nstolen) {
#ifdef HCLIB_STATS
                worker_stats[ws->id].count_steals++;
                worker_stats[ws->id].stolen_tasks += nstolen;
                worker_stats[ws->id].stolen_tasks_per_thread[victim] += nstolen;
#endif
                task = stolen[0];
                for (int i = 1; i < nstolen; i++) {
                    rt_schedule_async(stolen[i], ws);
                }
                break;
            }
        }
    }

    if (task == NULL) {
        return NULL;
    } else if (task && (on_fresh_ctx || task->non_blocking ||
                (task->current_finish && task->current_finish == current_finish))) {
        /*
         * If the retrieved task is either:
         *
         *   1) Non-blocking.
         *   2) Blocking and we know we have a fresh context we don't mind
         *      swapping out if we have to.
         *   3) We are doing this find_and_run_task as part of a help_finish at
         *      an end-finish, and the task we found to execute is also part of
         *      that same finish scope.
         *
         * then execute immediately.
         */
        execute_task(task);
        return NULL;
    } else {
        return task;
    }
}
Esempio n. 6
0
/*lint -save -e641 Fields of E_AvailableTasks have int values 5,10 and 20. It's save to convert to int. */
void Timer_Overflow_ServiceRoutine(void)
{
	
	enable_protection();
	rub_schd_counter++;

	disable_protection();
	
	if((rub_schd_counter%E_Task_5ms)==0)
	{
		execute_task(E_Task_5ms);
	}
	if((rub_schd_counter%E_Task_10ms)==0 )
	{
		execute_task(E_Task_10ms);
	}
	if((rub_schd_counter%E_Task_20ms)==0 )
	{

		execute_task(E_Task_20ms);
	}

	
}
void gpu_worker(void *arg)
{
	hs_worker *worker_arg = (hs_worker *) arg;

	bind_to_cpu(worker_arg);

	init_cuda(worker_arg->device_id);

//	printf("GPU I am id:%d\n", worker_arg->worker_id);

	pthread_mutex_lock(&worker_arg->mutex);
	worker_arg->initialized = 1;
	pthread_cond_signal(&worker_arg->ready);
	pthread_mutex_unlock(&worker_arg->mutex);

	_task_t task;
	while (is_running())
	{
		lock_queue(worker_arg->task_queue);

		task = pop_task(worker_arg->task_queue);

		if (task == NULL)
		{
			if (is_running())
				sleep_worker(worker_arg);

			unlock_queue(worker_arg->task_queue);
			continue;
		}

		unlock_queue(worker_arg->task_queue);

		if ((task->task->arch_type & worker_arg->arch) != worker_arg->arch)
		{
			push_task(worker_arg->task_queue, task);
			continue;
		}

		execute_task(worker_arg, task);
	}

	deinit_cuda();

	pthread_exit((void*) 0);
}
Esempio n. 8
0
void *handle_requests_loop(void *data)
{
  int rs;
  struct task *task;
  struct threadpool * tp = (struct threadpool *)data;

  // Pre-lock mutex
  rs = hthread_mutex_lock(tp->task_queue_mutex);

  while (1) {
    // Check to see if there are any tasks to execute
    if (tp->total_tasks > 0) {
      // If so, then grab one
      task = get_task(tp);
      aprintf("TID %d, got task!\n",hthread_self());


      if (task) {
    	// If the task is valid, then release lock
	    rs = hthread_mutex_unlock(tp->task_queue_mutex);

    	// Execute task
	    execute_task(task);
    	free(task);

        // Yield to allow another thread to do some work if possible
        hthread_yield();

    	// Re-acquire for next round
	    rs = hthread_mutex_lock(tp->task_queue_mutex);
      } else {
    	// Otherwise, wait for tasks
	    rs = hthread_cond_wait(tp->active_task, tp->task_queue_mutex);
      }
    } else {
      // Release lock and processor, let someone else do some work
      hthread_mutex_unlock(tp->task_queue_mutex);
      hthread_yield();

      // Re-acquire
      hthread_mutex_lock(tp->task_queue_mutex);
    }
  }
  return (void*)99;
}
Esempio n. 9
0
static void core_work_loop(hclib_task_t *starting_task) {
    if (starting_task) {
        execute_task(starting_task);
    }

    uint64_t wid;
    do {
        hclib_worker_state *ws = CURRENT_WS_INTERNAL;
        wid = (uint64_t)ws->id;
        hclib_task_t *must_be_null = find_and_run_task(ws, 1,
                &(hc_context->done_flags[wid].flag), 0, NULL);
        HASSERT(must_be_null == NULL);
    } while (hc_context->done_flags[wid].flag);

    // Jump back to the system thread context for this worker
    hclib_worker_state *ws = CURRENT_WS_INTERNAL;
    HASSERT(ws->root_ctx);
    ctx_swap(get_curr_lite_ctx(), ws->root_ctx, __func__);
    HASSERT(0); // Should never return here
}
Esempio n. 10
0
void hclib_yield(hclib_locale_t *locale) {
    hclib_task_t *stolen[STEAL_CHUNK_SIZE];
    hclib_worker_state *ws = CURRENT_WS_INTERNAL;
    finish_t *old_finish = ws->current_finish;
    hclib_task_t *old_task = ws->curr_task;

#ifdef HCLIB_STATS
    worker_stats[ws->id].count_yields++;
#endif

    hclib_task_t *task;
    do {
        ws = CURRENT_WS_INTERNAL;

#ifdef HCLIB_STATS
    worker_stats[ws->id].count_yield_iterations++;
#endif

        task = locale_pop_task(ws);
        if (!task) {
            int victim;
            const int nstolen = locale_steal_task(ws, (void **)stolen, &victim);
            if (nstolen) {
#ifdef HCLIB_STATS
                worker_stats[ws->id].count_steals++;
                worker_stats[ws->id].stolen_tasks += nstolen;
                worker_stats[ws->id].stolen_tasks_per_thread[victim] += nstolen;
#endif
                task = stolen[0];
                for (int i = 1; i < nstolen; i++) {
                    rt_schedule_async(stolen[i], ws);
                }
            }
            // task = locale_steal_task(ws);
        }

        if (task) {
            if (task->non_blocking) {
                execute_task(task);
            } else {
                LiteCtx *currentCtx = get_curr_lite_ctx();
                HASSERT(currentCtx);
                LiteCtx *newCtx = LiteCtx_create(yield_helper);
                newCtx->arg1 = task;
                newCtx->arg2 = locale;
#ifdef HCLIB_STATS
                worker_stats[ws->id].count_ctx_creates++;
#endif
                ctx_swap(currentCtx, newCtx, __func__);

                LiteCtx_destroy(currentCtx->prev);

                /*
                 * This break is necessary to prevent infinite loops. If there
                 * is only a single, yielding, blocking task eligible in the
                 * system you can run into situations where the yield picks it
                 * up, switches contexts, creates a continuation task, executes
                 * the picked-up task, which eventually yields, repeats the
                 * above, and runs this continuation, which then loops back up,
                 * calls the other continuation, and leads to an infinite loop.
                 */
                break;
            }
        }
    } while (task);

    ws = CURRENT_WS_INTERNAL;
    ws->current_finish = old_finish;
    ws->curr_task = old_task;
}
Esempio n. 11
0
int KSG_Task_Scheduler::process_task(long time_out)
{
	KSG_Task_Queue *queue = NULL;
	int ret;

L_AGAIN:
	{
		//ACE_GUARD_RETURN(ACE_Recursive_Thread_Mutex,mon,_process_mutex,-1);
		KSG_Task_Schd_Token_Guard guard(_process_mutex);
		ret = guard.acquire_token(NULL);

		if(!guard.is_owner())
			return ret;

		// 如果没有队列,当前线程进入阻塞状态
		queue = _implement->peek_queue(time_out);		
	}
	
	// 判断的过程中不需要加锁
	if(!queue)
	{
		// waiting for sleeping queue to be wake up
		ACE_DEBUG((LM_TRACE,"调度线程等待被唤醒"));
		ret = _implement->wait_for_wake_up(time_out);
		if(ret>0)
		{
			ACE_DEBUG((LM_TRACE,"等待超时..."));
			return 1;
		}
		else if(ret == 0)
		{
			ACE_DEBUG((LM_TRACE,"调度线程找到执行指令。。。。"));
			goto L_AGAIN;
		}
		else
			return -1;
	}
	ACE_DEBUG((LM_DEBUG,"queue-开始处理任务队列,queue[%s]",queue->queue_name().c_str()));
	// 执行任务
	ret = execute_task(queue);
	if(ret)
	{
		ACE_DEBUG((LM_ERROR,"处理队列queue[%s],返回错误[%d]",queue->queue_name(),ret));
	}

L_FINISH:
	if(queue)
	{
		ACE_DEBUG((LM_DEBUG,"queue-处理完成任务,queue[%s]",queue->queue_name().c_str()));
		KSG_Task_Schd_Token_Guard guard(_process_mutex);
		ret = guard.acquire_token(NULL);
		if(!guard.is_owner())
		{
			ACE_DEBUG((LM_ERROR,"等待归还队列失败!"));
			goto L_FINISH;
		}
		ret = _implement->push_queue(queue);
		if(ret)
			ACE_DEBUG((LM_NOTICE,"处理队列完成,回收队列失败!!!"));
		//ACE_DEBUG((LM_INFO,"执行完成,队列中任务数[%d]",queue->count_of_tasks()));
	}
	return 1;
}