/* * Pushes a -rtws task to the latest rq if its currently executing task has lower priority. * Only remote rqs are consider here. */ static int push_latest_rtws(struct rq *this_rq, struct task_struct *p, int target_cpu) { struct rq *target_rq; int ret = 0; target_rq = cpu_rq(target_cpu); /* We might release rq lock */ get_task_struct(p); printk(KERN_INFO "check preempting other %llu - %llu\n", p->rtws.job.deadline, target_rq->rtws.earliest_dl); double_lock_balance(this_rq, target_rq); /* TODO check if in the meanwhile a task was dispatched to here */ if (target_rq->rtws.nr_running && !time_before_rtws(p->rtws.job.deadline, target_rq->rtws.earliest_dl)) goto unlock; set_task_cpu(p, target_cpu); activate_task(target_rq, p, ENQUEUE_HEAD); ret = 1; resched_task(target_rq->curr); unlock: double_unlock_balance(this_rq, target_rq); put_task_struct(p); return ret; }
/** * @ingroup sys * * initialize tasks manager */ int init_tasks() { int tid; tasks = NULL; task_count = 0; task_index = 0; ctask = NULL; tid = create_task("main"); activate_task(tid); return tid; }
void user_main() { sem_init(&sem_led, 1); // activate_task(&task0, 1, 1); #ifndef _PERIODIC_TASK activate_task(&task1, 1, 1, 0); activate_task(&task2, 1, 2, 0); activate_task(&task3, 1, 3, 0); activate_task(&task4, 1, 4, 0); #else activate_task(&task0_per, 1, 1, 8); activate_task(&task1_per, 1, 1, 20); activate_task(&task2_per, 1, 2, 15); activate_task(&task3_per, 1, 3, 15); send_ape_request(&ape1, 3, 0, 0); send_ape_request(&ape2, 4, 4, 0); #endif }
void VM::activate_thread(Thread* thread) { if(thread == globals.current_thread.get()) { thread->task(this, globals.current_task.get()); return; } /* May have been using Tasks directly. */ globals.current_thread->task(this, globals.current_task.get()); queue_thread(globals.current_thread.get()); thread->sleep(this, Qfalse); globals.current_thread.set(thread); if(globals.current_task.get() != thread->task()) { activate_task(thread->task()); } }
/* * At this point we know the task is not on its rtws_rq because the timer was running, which * means the task has finished its last instance and was waiting for next activation period. */ static enum hrtimer_restart timer_rtws(struct hrtimer *timer) { unsigned long flags; struct sched_rtws_entity *rtws_se = container_of(timer, struct sched_rtws_entity, timer); struct task_struct *p = task_of_rtws_se(rtws_se); struct rq *rq = task_rq_lock(p, &flags); printk(KERN_INFO "**task %d state %ld on rq %d timer fired at time %Ld on cpu %d**\n", p->pid, p->state, p->se.on_rq, rq->clock, rq->cpu); /* * We need to take care of a possible races here. In fact, the * task might have changed its scheduling policy to something * different from SCHED_RTWS (through sched_setscheduler()). */ if (!rtws_task(p)) goto unlock; WARN_ON(rtws_se->parent); /* * To avoid contention on global rq and reduce some overhead, * when a new task arrives and local rq is idle, we make sure * it gets inserted on this rq. Otherwise we try to find a * suitable rq for it. This way it only ends up in global rq * when all rqs are busy and it has the lowest priority (latest * deadline) comparing to running tasks.Nevertheless, since we * are dealing with a new instance, scheduling parameters must be updated. */ update_task_rtws(rq, rtws_se); if (rq->rtws.nr_running && dispatch_rtws(rq, p)) goto unlock; activate_task(rq, p, ENQUEUE_HEAD); resched_task(rq->curr); unlock: task_rq_unlock(rq,p,&flags); return HRTIMER_NORESTART; }
/* * Tries to push a -rtws task to a "random" idle rq. */ static int push_idle_rtws(struct rq *this_rq, struct task_struct *p) { struct rq *target_rq; int ret = 0, target_cpu; struct cpudl *cp = &this_rq->rd->rtwsc_cpudl; retry: target_cpu = find_idle_cpu_rtws(cp); if (target_cpu == -1) return 0; printk(KERN_INFO "idle cpu %d\n", target_cpu); target_rq = cpu_rq(target_cpu); /* We might release rq lock */ get_task_struct(p); double_lock_balance(this_rq, target_rq); if (unlikely(target_rq->rtws.nr_running)) { double_unlock_balance(this_rq, target_rq); put_task_struct(p); target_rq = NULL; goto retry; } set_task_cpu(p, target_cpu); activate_task(target_rq, p, 0); ret = 1; resched_task(target_rq->curr); double_unlock_balance(this_rq, target_rq); put_task_struct(p); return ret; }
static int pull_task_rtws(struct rq *this_rq) { struct task_struct *p; struct sched_rtws_entity *rtws_se; struct global_rq *global_rq = this_rq->rtws.global_rq; int ret = 0; if (!global_rq->nr_running) return 0; rtws_se = __pick_next_task_rtws(global_rq); if (unlikely(!rtws_se)) return 0; p = task_of_rtws_se(rtws_se); WARN_ON(!rtws_task(p)); printk(KERN_INFO "= task %d stolen %d PULLED by cpu %d\n", p->pid, rtws_se->stolen, this_rq->cpu); /* * We tranfer the task from global rq to this rq */ __dequeue_task_rtws(global_rq, rtws_se); if (rtws_se->stolen == this_rq->cpu) rtws_se->stolen = -1; set_task_cpu(p, this_rq->cpu); activate_task(this_rq, p, ENQUEUE_HEAD); ret = 1; this_rq->rtws.tot_pulls++; return ret; }
void worker_engine::start_more_tasks() { // TODO: make desired_busy_threads a configurable member. auto desired_busy_threads = MAX_HARDWARE_THREADS; static bool logged_thread_count = false; if (!logged_thread_count) { logged_thread_count = true; xlog("worker_engine will try to keep %1 threads busy.", desired_busy_threads); } auto atc = data->active_thread_count.load(); if (atc < desired_busy_threads) { auto asgn = get_current_assignment(); if (asgn) { assignment::tasklist_t const & readylist = asgn->get_list_by_status( task_status::ts_ready); std::list<task const *> to_start; for (auto i = readylist.cbegin(); i != readylist.cend(); ++i) { task const & t = **i; to_start.push_back(&t); if (++atc == desired_busy_threads) { break; } } if (!to_start.empty()) { lock_guard<mutex> lock(data->tmap_mutex); for (auto i : to_start) { auto cmdline = asgn->activate_task(i->get_id()); thread * launched = data->launcher(*this, cmdline); thread_task_pair ttpair(launched, i); data->threadmap.insert( { launched->get_id(), ttpair }); } data->active_thread_count += to_start.size(); } } } }
static int steal_pjob_rtws(struct rq *this_rq) { int ret = 0, this_cpu = this_rq->cpu, target_cpu; struct task_struct *p; struct rq *target_rq; struct global_rq *global_rq = this_rq->rtws.global_rq; if (global_rq->random) { /* * Pseudo random selection of our victim rq, * among rqs with to-be-stolen pjobs, that's it. */ target_cpu = find_random_stealable_cpu_rtws(&this_rq->rd->rtwss_cpudl, this_rq->cpu); } else { /* * When not in random mode, we gotta find the rq with the earliest * deadline stealable pjob. */ target_cpu = find_earliest_stealable_cpu_rtws(&this_rq->rd->rtwss_cpudl); } if (target_cpu == -1) return 0; printk(KERN_INFO "stealable cpu %d\n", target_cpu); target_rq = cpu_rq(target_cpu); /* * We can potentially drop this_rq's lock in * double_lock_balance, and another CPU could alter this_rq */ double_lock_balance(this_rq, target_rq); if (unlikely(target_rq->rtws.nr_running <= 1)) goto unlock; if (unlikely(this_rq->rtws.nr_running)) goto unlock; p = pick_next_stealable_pjob_rtws(&target_rq->rtws); if (p) { WARN_ON(p == target_rq->curr); WARN_ON(!p->se.on_rq); WARN_ON(!rtws_task(p)); deactivate_task(target_rq, p, 0); p->rtws.stolen = target_cpu; set_task_cpu(p, this_cpu); activate_task(this_rq, p, 0); this_rq->rtws.tot_steals++; printk(KERN_INFO "=task %d STOLEN by cpu %d from cpu %d!\n", p->pid, this_cpu, target_cpu); ret = 1; } unlock: double_unlock_balance(this_rq, target_rq); return ret; }
/** * execute a call to a unit */ int unit_exec(int lib_id, int index, var_t * ret) { unit_sym_t *us; // unit's symbol data bc_symbol_rec_t *ps; // program's symbol data int my_tid; stknode_t udf_rv; my_tid = ctask->tid; ps = &prog_symtable[index]; us = &(taskinfo(ps->task_id)->sbe.exec.exptable[ps->exp_idx]); // switch (ps->type) { case stt_variable: break; case stt_procedure: exec_sync_variables(1); cmd_call_unit_udp(kwPROC, ps->task_id, us->address, 0); activate_task(ps->task_id); if (prog_error) { gsb_last_error = prog_error; taskinfo(my_tid)->error = gsb_last_error; return 0; } bc_loop(2); if (prog_error) { gsb_last_error = prog_error; taskinfo(my_tid)->error = gsb_last_error; return 0; } activate_task(my_tid); exec_sync_variables(0); break; case stt_function: exec_sync_variables(1); cmd_call_unit_udp(kwFUNC, ps->task_id, us->address, us->vid); activate_task(ps->task_id); if (prog_error) { gsb_last_error = prog_error; taskinfo(my_tid)->error = gsb_last_error; return 0; } bc_loop(2); if (prog_error) { gsb_last_error = prog_error; taskinfo(my_tid)->error = gsb_last_error; return 0; } // get last variable from stack code_pop(&udf_rv, kwTYPE_RET); if (udf_rv.type != kwTYPE_RET) { err_stackmess(); } else { v_set(ret, udf_rv.x.vdvar.vptr); v_free(udf_rv.x.vdvar.vptr); // free ret-var free(udf_rv.x.vdvar.vptr); } activate_task(my_tid); exec_sync_variables(0); break; }; return (prog_error == 0); }
Task* VM::new_task() { Task* task = Task::create(this); activate_task(task); return task; }