__LINK_C error_t sched_cancel_task(task_t task) { check_structs_are_valid(); error_t retVal; start_atomic(); uint8_t id = get_task_id(task); if(id == NO_TASK) retVal = EINVAL; else if(!is_scheduled(id)) retVal = EALREADY; else { if (NG(m_info)[id].prev == NO_TASK) NG(m_head)[NG(m_info)[id].priority] = NG(m_info)[id].next; else NG(m_info)[NG(m_info)[id].prev].next = NG(m_info)[id].next; if (NG(m_info)[id].next == NO_TASK) NG(m_tail)[NG(m_info)[id].priority] = NG(m_info)[id].prev; else NG(m_info)[NG(m_info)[id].next].prev = NG(m_info)[id].prev; NG(m_info)[id].prev = NO_TASK; NG(m_info)[id].next = NO_TASK; NG(m_info)[id].priority = NOT_SCHEDULED; check_structs_are_valid(); retVal = SUCCESS; } end_atomic(); return retVal; }
__LINK_C error_t sched_register_task(task_t task) { error_t retVal; check_structs_are_valid(); //INT_Disable(); start_atomic(); if(NG(num_registered_tasks) >= NUM_TASKS) retVal = ENOMEM; else if(get_task_id(task) != NO_TASK) retVal = EALREADY; else { for(int i = NG(num_registered_tasks); i >= 0; i--) { if (i == 0 || ((void*)NG(m_index)[i-1].task) < ((void*)task)) { NG(m_index)[i].task = task; NG(m_index)[i].index = NG(num_registered_tasks); NG(m_info)[NG(m_index)[i].index].task = task; break; } else { NG(m_index)[i] = NG(m_index)[i-1]; } } NG(num_registered_tasks)++; retVal = SUCCESS; } //INT_Enable(); end_atomic(); check_structs_are_valid(); return retVal; }
JNIEXPORT jint JNICALL Java_es_bsc_cepbatools_extrae_Wrapper_GetTaskID( JNIEnv *env, jclass jc) { UNREFERENCED(env); UNREFERENCED(jc); return get_task_id(); }
task_t *task_create( const char *name, void (*entry)(void *parameter), void *parameter, u16int stackSize, u8int priority, u16int tick){ task_t *task; u32int *stackAddr; u32int task_id; task_id = get_task_id(); if( task_id < 0 ){ return NULL; } task = (task_t *)kmalloc(sizeof(task_t)); if(task == NULL) return NULL; stackAddr = (void *)kmalloc(stackSize); if( stackAddr == NULL ){ kfree(task); return NULL; } for( int temp=0; temp<NAME_MAXLENTH; temp++ ){ task->name[temp] = name[temp]; } task->task_id = task_id; task->entry = (void *)entry; task->parameter = parameter; task->stack_addr = stackAddr; task->stack_size = stackSize; task->node.next = NULL; task->node.prev = NULL; task->init_tick = tick; task->remaining_tick = tick; task->priority = priority; task->sp = (void *)( (void *)stackAddr + stackSize -4 ); memset( (u8int *)task->stack_addr, '?', task->stack_size ); task->sp = (void *)init_stack(task->sp, task->entry, task->parameter); task_table[task->task_id] = 1; task->timer = timer_create(task->name, task_timeout, task, 0, SET_ONESHOT); return task; }
//Create an implicit task struct struct iTask create_itask () { struct iTask task; task.thread_id = get_thread_id (get_level ()); task.task_id = get_new_task_id (); task.task_parent_id = get_task_id (get_thread_id (get_level () - 1)); task.flag = 1; return task; }
__LINK_C bool sched_is_scheduled(task_t task) { //INT_Disable(); start_atomic(); uint8_t task_id = get_task_id(task); bool retVal = false; if(task_id != NO_TASK) retVal = is_scheduled(task_id); //INT_Enable(); end_atomic(); return retVal; }
/** * Brief: Adds task to a chosen core and if the core isn't running, * resets it's thread to start the execution. * Param: The task to be executed. */ std::size_t add_task(TASK task) { std::size_t core_id = get_core_id(); // Get a suitable core. std::size_t task_id = get_task_id(); // Find the smallest id. task_pack<TASK> tmp_pack; tmp_pack.id = task_id; tmp_pack.task = std::move(task); set_result(task_id, T(), false); // Create a record for this task. cores_[core_id].add_task(std::move(tmp_pack)); if(!cores_[core_id].running) { // Re-start the core thread if it's not running. cores_[core_id].running = true; core_threads_[core_id].reset( new std::thread(&Scheduler<T, TASK>::run_core, this, core_id) ); core_threads_[core_id]->detach(); } return task_id; }
std::size_t add_task(TASK task) { std::size_t core_id = get_core_id(); // Get a suitable core. std::size_t task_id = get_task_id(); // Find the smallest id. task_pack<TASK> tmp_pack; tmp_pack.id = task_id; tmp_pack.task = std::move(task); set_result(task_id, 0, false); // Create a record for this task. cores_[core_id].add_task(std::move(tmp_pack)); if(!cores_[core_id].running) { // If the core is available, execute the task immedietly. // The mutex will be unlocked at the end of the run_core method. cores_[core_id].running = true; core_threads_[core_id].reset( new std::thread(&Scheduler<void, TASK>::run_core, this, core_id) ); core_threads_[core_id]->detach(); } return task_id; }
__LINK_C error_t sched_post_task_prio(task_t task, uint8_t priority) { error_t retVal; start_atomic(); check_structs_are_valid(); uint8_t task_id = get_task_id(task); if(task_id == NO_TASK) retVal = EINVAL; else if(priority > MIN_PRIORITY || priority < MAX_PRIORITY) retVal = ESIZE; else if (is_scheduled(task_id)) retVal = EALREADY; else { if(NG(m_head)[priority] == NO_TASK) { NG(m_head)[priority] = task_id; NG(m_tail)[priority] = task_id; } else { NG(m_info)[NG(m_tail)[priority]].next = task_id; NG(m_info)[task_id].prev = NG(m_tail)[priority]; NG(m_tail)[priority] = task_id; } NG(m_info)[task_id].priority = priority; //if our priority is higher than the currently known maximum priority if((priority < NG(current_priority))) NG(current_priority) = priority; check_structs_are_valid(); retVal = SUCCESS; } end_atomic(); check_structs_are_valid(); return retVal; }
JNIEXPORT jint JNICALL Java_es_bsc_tools_extrae_Wrapper_GetTaskId(JNIEnv *env, jclass jc) { return get_task_id(); }