static void *do_work(void *owning_pool) { thread_pool_t *pool =(thread_pool_t *)owning_pool; state_t mystate = PRUN; task_fun mytask = NULL; void *myarg = NULL; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); pthread_cleanup_push((task_fun)pthread_mutex_unlock, (void *)&pool->mutex); for( ; ; ) { pthread_mutex_lock(&pool->mutex); while((pool->task.head == NULL) && (pool->emg_task.head == NULL)) { if(pool->worker.cur > pool->worker.min || pool->state == PEXIT) { mystate = PEXIT; break; } pthread_cond_wait(&pool->job_posted, &pool->mutex); } if(mystate == PEXIT) { del_worker(&pool->worker, pthread_self()); break; } //printf("start get task queue\n"); if(get_task(&pool->emg_task, &mytask, &myarg) == -1) { //if emergency queue is null, get normal task get_task(&pool->task, &mytask, &myarg); } pthread_cond_signal(&pool->job_taken); pthread_mutex_unlock(&pool->mutex); if(mytask) { mytask(myarg); mytask = myarg = NULL; } } pthread_cleanup_pop(1); return NULL; }
static VMINT32 measurement(VM_THREAD_HANDLE thread_handle, void* user_data) { VMINT measured_result; handle_details* details; hx711_task current_task; write_console("thread enter\n"); details = (handle_details*) user_data; get_task(details, ¤t_task); enter_standby(¤t_task); while (1) { get_task(details, ¤t_task); if (current_task.op == TERMINATE) { break; } if (current_task.op == WAIT) { enter_standby(¤t_task); continue; } if (current_task.op != A128) { wait_result(¤t_task); read_result(¤t_task); clock_setup_pulses(¤t_task); } wait_result(¤t_task); measured_result = read_result(¤t_task); current_task.callback(current_task.callback_env, measured_result); enter_standby(¤t_task); } write_console("thread exit\n"); enter_standby(¤t_task); vm_mutex_lock(&details->mutex); vm_dcl_close(current_task.sda_handle); vm_dcl_close(current_task.scl_handle); current_task.callback(current_task.callback_env, 0); vm_free(details); return 0; }
static int get_statm(int pid, char * buffer) { struct task_struct ** p = get_task(pid), *tsk; int size=0, resident=0, share=0, trs=0, lrs=0, drs=0, dt=0; if (!p || (tsk = *p) == NULL) return 0; if (tsk->mm && tsk->mm != &init_mm) { struct vm_area_struct * vma = tsk->mm->mmap; while (vma) { pgd_t *pgd = pgd_offset(tsk->mm, vma->vm_start); int pages = 0, shared = 0, dirty = 0, total = 0; statm_pgd_range(pgd, vma->vm_start, vma->vm_end, &pages, &shared, &dirty, &total); resident += pages; share += shared; dt += dirty; size += total; if (vma->vm_flags & VM_EXECUTABLE) trs += pages; /* text */ else if (vma->vm_flags & VM_GROWSDOWN) drs += pages; /* stack */ else if (vma->vm_end > 0x60000000) lrs += pages; /* library */ else drs += pages; vma = vma->vm_next; } } return sprintf(buffer,"%d %d %d %d %d %d %d\n", size, resident, share, trs, lrs, drs, dt); }
void yield_to() { task_context_t* stack = (task_context_t*)(curr_task->esp); int pid = stack->eax; //If we call yield with a target pid of 0 we just want to pass on control, so we sleep and schedule if(pid==0 || pid>SCHEDULER_MAX_TASKS) { curr_task->waiting_on = (lock_t*)pid; curr_task->state = TSK_Sleeping; //terminal_writestring("Making process sleep\n"); schedule(); return; } task_t* tsk = get_task(pid); if(tsk) { //terminal_writestring("yielded control from "); task_t* ct = get_current_task(); //terminal_writeuint32(ct->pid); tsk->lender_task = ct; tsk->state = TSK_Waiting; tsk->time_slice = ct->time_slice; curr_task = tsk; //terminal_writestring("to "); //terminal_writeuint32(curr_task->pid); //terminal_writestring("\n"); } }
/* local functions */ static void alist_process(const acmd_callback_t abi[], unsigned int abi_size) { u32 inst1, inst2; unsigned int acmd; const OSTask_t * const task = get_task(); const unsigned int *alist = (unsigned int*)(rspInfo.RDRAM + task->data_ptr); const unsigned int * const alist_end = alist + (task->data_size >> 2); while (alist != alist_end) { inst1 = *(alist++); inst2 = *(alist++); acmd = inst1 >> 24; if (acmd < abi_size) { (*abi[acmd])(inst1, inst2); } else { rspDebugMessage(M64MSG_WARNING, "Invalid ABI command %u", acmd); } } }
void schedule(unsigned int *stack){ if(active == TRUE){ task_t* cur_task = dequeue_task(); if(cur_task != NULL){ cur_pid = cur_task->pid; dbg_bochs_print("@@@@@@@"); dbg_bochs_print(cur_task->name); if(cur_task->status!=NEW){ cur_task->esp=*stack; } else { cur_task->status=READY; ((task_register_t *)(cur_task->esp))->eip = cur_task->eip; } enqueue_task(cur_task->pid, cur_task); cur_task=get_task(); if(cur_task->status==NEW){ cur_task->status=READY; } dbg_bochs_print(" -- "); dbg_bochs_print(cur_task->name); dbg_bochs_print("\n"); //load_pdbr(cur_task->pdir); *stack = cur_task->esp; } else { enqueue_task(cur_task->pid, cur_task); } } active = FALSE; return; }
static EngineState default_handleTransform(Command *command) { INFO("default_handleTransform\n"); assert(command != NULL); assert(command->result != NULL); XslTask* task = get_task(command); assert(task != NULL); assert(task->input_doc != NULL); assert(task->xslt_doc != NULL); Int32 buffersize = (get_doc_size(task->input_doc) + get_doc_size(task->xslt_doc) + 1); DBG("Assigning result buffer of %i\n", buffersize); if (!make_result_buffer(buffersize, command)) { return OutOfMemoryError; } char *input = get_doc_buffer(task->input_doc); assert(input != NULL); char *stylesheet = get_doc_buffer(task->xslt_doc); assert(stylesheet != NULL); if (!write_result_buffer(input, command)) return OutOfMemoryError; if (!write_result_buffer(stylesheet, command)) return OutOfMemoryError; return Ok; };
static void pr_ret (const char *fn, unsigned line, const char *fmt, ...) { va_list args; task_s *t; if (!Tau_debug_func) return; lock(); t = get_task(); indent(t->tk_depth); putlog('a' + (t - Task_pool)); putlog(' '); prfn(fn, line); va_start(args, fmt); vformat(fmt, args); va_end(args); log(); if (t->tk_depth) { --t->tk_depth; } else { printk(PR_LEVEL "stack underflow at %s\n", fn); } unlock(); }
static void handle_unknown_task(unsigned int sum) { char filename[256]; const OSTask_t * const task = get_task(); RSP_DEBUG_MESSAGE(M64MSG_WARNING, "unknown OSTask: sum %x PC:%x", sum, *rspInfo.SP_PC_REG); sprintf(&filename[0], "task_%x.log", sum); dump_task(filename, task); // dump ucode_boot sprintf(&filename[0], "ucode_boot_%x.bin", sum); dump_binary(filename, rspInfo.RDRAM + (task->ucode_boot & 0x7fffff), task->ucode_boot_size); // dump ucode if (task->ucode != 0) { sprintf(&filename[0], "ucode_%x.bin", sum); dump_binary(filename, rspInfo.RDRAM + (task->ucode & 0x7fffff), 0xf80); } // dump ucode_data if (task->ucode_data != 0) { sprintf(&filename[0], "ucode_data_%x.bin", sum); dump_binary(filename, rspInfo.RDRAM + (task->ucode_data & 0x7fffff), task->ucode_data_size); } // dump data if (task->data_ptr != 0) { sprintf(&filename[0], "data_%x.bin", sum); dump_binary(filename, rspInfo.RDRAM + (task->data_ptr & 0x7fffff), task->data_size); } }
static void normal_task_dispatching() { const OSTask_t * const task = get_task(); const unsigned int sum = sum_bytes(rspInfo.RDRAM + task->ucode, min(task->ucode_size, 0xf80) >> 1); switch (sum) { /* StoreVe12: found in Zelda Ocarina of Time [misleading task->type == 4] */ case 0x278: /* Nothing to emulate */ return; /* GFX: Twintris [misleading task->type == 0] */ case 0x212ee: if (FORWARD_GFX) { forward_gfx_task(); return; } break; /* JPEG: found in Pokemon Stadium J */ case 0x2c85a: jpeg_decode_PS0(); return; /* JPEG: found in Zelda Ocarina of Time, Pokemon Stadium 1, Pokemon Stadium 2 */ case 0x2caa6: jpeg_decode_PS(); return; /* JPEG: found in Ogre Battle, Bottom of the 9th */ case 0x130de: case 0x278b0: jpeg_decode_OB(); return; } handle_unknown_task(sum); }
void suicide() { task_t* cur_task; cur_task = get_task(); cur_task->status = DEAD; dbg_bochs_print("suicide\n"); while(TRUE); }
static int get_arg(int pid, char * buffer) { struct task_struct ** p = get_task(pid); if (!p || !*p) return 0; return get_array(p, (*p)->arg_start, (*p)->arg_end, buffer); }
static int get_env(int pid, char * buffer) { struct task_struct ** p = get_task(pid); if (!p || !*p || !(*p)->mm) return 0; return get_array(p, (*p)->mm->env_start, (*p)->mm->env_end, buffer); }
marathon_app::ptr_t mesos_state_t::add_app(const Json::Value& app, const std::string& /*framework_id*/) { marathon_app::ptr_t p_app = 0; const Json::Value& app_id = app["id"]; if(!app_id.isNull()) { std::string id = app_id.asString(); g_logger.log("Adding Marathon app: " + id, sinsp_logger::SEV_DEBUG); std::string group_id = marathon_app::get_group_id(id); if(!group_id.empty()) { p_app = add_or_replace_app(id, group_id); if(p_app) { const Json::Value& labels = app["labels"]; if(!labels.isNull()) { p_app->set_labels(labels); } g_logger.log("Added app [" + id + "] to Marathon group: [" + group_id + ']', sinsp_logger::SEV_DEBUG); const Json::Value& tasks = app["tasks"]; if(tasks.size()) { g_logger.log("App [" + id + "] has " + std::to_string(tasks.size()) + " tasks.", sinsp_logger::SEV_DEBUG); for(const auto& task : tasks) { Json::Value task_id = task["id"]; if(!task_id.isNull()) { std::string tid = task_id.asString(); g_logger.log("Adding Mesos task ID to app [" + id + "]: " + tid, sinsp_logger::SEV_DEBUG); mesos_framework::task_ptr_t pt = get_task(task_id.asString()); if(pt) { pt->set_marathon_app_id(id); add_task_to_app(p_app, tid); } else { g_logger.log("Marathon task not found in mesos state: " + tid, sinsp_logger::SEV_WARNING); } } } } } else { g_logger.log("NOT added app [" + id + "] to Marathon group: [" + group_id + ']', sinsp_logger::SEV_ERROR); } } else { g_logger.log("Could not determine group ID for app: " + id, sinsp_logger::SEV_ERROR); } } return p_app; }
int tau_enter (const char *fn) { task_s *t; if (!Tau_debug_func) return 1; t = get_task(); ++t->tk_depth; tau_pr(fn, 0, "\n"); return 1; }
void tp_base<task_type>::thread_body(unsigned thread_id) { while(keep_going) { auto t = get_task(thread_id); task_action(thread_id, t); done_task(t); } stopped(thread_id); }
void * solve_para(void *arg) { board *b; unsigned int sum = 0; while ((b = get_task())) sum += solve(b); return (void*)((uintptr_t)sum); }
static int luat_resume(lua_State *L) { lua_Task *lt = get_task(L, 1); int top = lua_gettop(L); message_t *m = lua_newmessage(L, top - 1); int rc = task_resume(lt->tid, m); if (rc != SUCCESS) { return luaL_error(L, "Unable to resume task %d", rc); } lua_pushboolean(L, 1); return 1; }
void TaskManager::wait_task(Task* task) { while (task->m_dependent_tasks) { auto* new_task = get_task(); if (new_task) { execute_task(new_task); } } }
void enqueue_ray_task(long qid, Element *e, long mode, long process_id) { Task *t ; /* Create task object */ t = get_task(process_id) ; t->task_type = TASK_RAY ; t->task.ray.e = e ; /* Put in the queue */ enqueue_task( qid, t, mode ) ; }
void simple_scheduler::thread_proc(int id) { try { schedule_queue_type::value_type instance; while (!stop_requested_) { instance = queue_.pop(); if (!instance) { boost::unique_lock<boost::mutex> lock(idle_thread_mutex_); idle_thread_cond_.wait(lock); continue; } try { boost::posix_time::time_duration off = now() - (*instance).time; if (off.total_seconds() > error_threshold_) { log_error("Ran scheduled item " + strEx::s::xtos(instance->schedule_id) + " " + strEx::s::xtos(off.total_seconds()) + " seconds to late from thread " + strEx::s::xtos(id)); } boost::thread::sleep((*instance).time); } catch (boost::thread_interrupted &e) { if (!queue_.push(*instance)) log_error("ERROR"); if (stop_requested_) { log_error("Terminating thread: " + strEx::s::xtos(id)); return; } continue; } catch (...) { if (!queue_.push(*instance)) log_error("ERROR"); continue; } boost::posix_time::ptime now_time = now(); boost::optional<schedules::schedule_object> item = get_task((*instance).schedule_id); if (item) { try { if (handler_) handler_->handle_schedule(*item); reschedule(*item,now_time); } catch (...) { log_error("UNKNOWN ERROR RUNING TASK: "); reschedule(*item); } } else { log_error("Task not found: " + strEx::s::xtos(instance->schedule_id)); } } } catch (const std::exception &e) { log_error("Exception in scheduler thread (thread will be killed): " + utf8::utf8_from_native(e.what())); } catch (...) { log_error("Exception in scheduler thread (thread will be killed)"); } }
/** * Registers a new thread to the runtime system. This includes * initialization of the hardware performance counters */ void rec_sched_register_thread(pid_t parent, pid_t child, int flags) { struct tasklist_entry* entry = sys_malloc_zero(sizeof(*entry)); struct task* t = &entry->t; assert(child > 0 && child < MAX_TID); t->status = 0; t->rec_tid = t->tid = child; t->child_mem_fd = sys_open_child_mem(child); push_placeholder_event(t); if (parent) { struct task* parent_t = get_task(parent); struct sighandlers* parent_handlers = parent_t->sighandlers; t->syscallbuf_lib_start = parent_t->syscallbuf_lib_start; t->syscallbuf_lib_end = parent_t->syscallbuf_lib_end; t->task_group = (SHARE_TASK_GROUP & flags) ? task_group_add_and_ref(parent_t->task_group, t) : task_group_new_and_add(t); t->sighandlers = (SHARE_SIGHANDLERS & flags) ? sighandlers_ref(parent_handlers) : sighandlers_copy(parent_handlers); } else { /* After the first task is forked, we always need to * know the parent in order to initialize some task * state. */ static int is_first_task = 1; assert(is_first_task); is_first_task = 0; t->task_group = task_group_new_and_add(t); /* The very first task we fork inherits our * sighandlers (which should all be default at this * point, but ...). From there on, new tasks will * transitively inherit from this first task. */ t->sighandlers = sighandlers_new(); sighandlers_init_from_current_process(t->sighandlers); } /* These will be initialized when the syscall buffer is. */ t->desched_fd = t->desched_fd_child = -1; sys_ptrace_setup(child); init_hpc(t); start_hpc(t, rr_flags()->max_rbc); CIRCLEQ_INSERT_TAIL(&head, entry, entries); num_active_threads++; tid_to_entry[child] = entry; }
void enqueue_radavg_task(long qid, Element *e, long mode, long process_id) { Task *t ; /* Create task object */ t = get_task(process_id) ; t->task_type = TASK_RAD_AVERAGE ; t->task.rad.e = e ; t->task.rad.mode = mode ; /* Put in the queue */ enqueue_task( qid, t, TASK_INSERT ) ; }
bool TaskManager::on_notify_add_task() { SLOG_DEBUG("Thread[ID=%d,Addr=%x] do task",get_thread_id(), this); string file_name; while(get_task(file_name)) { SLOG_DEBUG("Thread[ID=%d, Addr=%x] receive task=%s", get_thread_id(), this, file_name.c_str()); if(send_get_filesize_task(file_name) == false) SLOG_ERROR("sent get_file_size protocol failed. file_name=%s", file_name.c_str()); } return true; }
static int get_status(int pid, char * buffer) { char * orig = buffer; struct task_struct ** p = get_task(pid), *tsk; if (!p || (tsk = *p) == NULL) return 0; buffer = task_name(tsk, buffer); buffer = task_state(tsk, buffer); buffer = task_mem(tsk, buffer); buffer = task_sig(tsk, buffer); return buffer - orig; }
/** * @brief * This function wakes up the task but does not set the condition * So the task will go back to sleep again. * This function is called from Timer Interrupt handler * @param tevent */ void test_wq_noset_condition(struct timer_event* tevent) { void* data = tevent->data; u32 task_id = (u32)data; sw_printf("SW: No Set condition Int Handler\n"); struct sw_task* task = get_task(task_id); sw_printf("SW: Calling wakeup task from Int handler \n"); sw_wakeup(&task->wq_head, WAKE_UP); sw_printf("SW: Called wakeup task from Int handler \n"); tevent->state &= ~TIMER_STATE_EXECUTING; timer_event_destroy(tevent); }
EXPORT mach_port_t attach(pid_t infoPid) { mach_port_t task; int count = 0; task = get_task(infoPid); if(task == 0) { int kret = 0; RETURN_ON_MACH_ERROR("[-attach] invalid pid", kret); } if(bad_list.max_attach == 0) { bad_list.max_attach = 1; } while(count < bad_list.x) { if(bad_list.y[count]->task == task) { int kret = 0; RETURN_ON_MACH_ERROR("[-attach] duplicate pid", kret); } count++; } if(bad_list.x >= (bad_list.max_attach - 1)) { DEBUG_PRINT("ALLOCATING MORE! CURRENTLY: %d\n", bad_list.max_attach); bad_list.y = realloc(bad_list.y, sizeof(interface*) * (bad_list.max_attach*2)); bad_list.max_attach *= 2; } bad_list.y[bad_list.x] = malloc(sizeof(interface*)); interface* tmp = malloc(sizeof(interface)); memset(tmp, 0, sizeof(interface)); tmp->task = task; tmp->pid = infoPid; tmp->current_break = 0; tmp->current_exception = 0; tmp->single_step = NULL; tmp->registered_exception_handler = 0; bad_list.y[bad_list.x++] = tmp; DEBUG_PRINT("ATTACHING TO PROCESS # %d\n", bad_list.x); return task; }
static int get_statm(int pid, char * buffer) { struct task_struct ** p = get_task(pid); int i, tpag; int size=0, resident=0, share=0, trs=0, lrs=0, drs=0, dt=0; unsigned long ptbl, *buf, *pte, *pagedir, map_nr; if (!p || !*p) return 0; tpag = (*p)->end_code / PAGE_SIZE; if ((*p)->state != TASK_ZOMBIE) { pagedir = (unsigned long *) (*p)->tss.cr3; for (i = 0; i < 0x300; ++i) { if ((ptbl = pagedir[i]) == 0) { tpag -= PTRS_PER_PAGE; continue; } buf = (unsigned long *)(ptbl & PAGE_MASK); for (pte = buf; pte < (buf + PTRS_PER_PAGE); ++pte) { if (*pte != 0) { ++size; if (*pte & 1) { ++resident; if (tpag > 0) ++trs; else ++drs; if (i >= 15 && i < 0x2f0) { ++lrs; if (*pte & 0x40) ++dt; else --drs; } map_nr = MAP_NR(*pte); if (map_nr < (high_memory / PAGE_SIZE) && mem_map[map_nr] > 1) ++share; } } --tpag; } } } return sprintf(buffer,"%d %d %d %d %d %d %d\n", size, resident, share, trs, lrs, drs, dt); }
void *handle_requests_loop(void *data) { int rs; struct task *task; struct threadpool * tp = (struct threadpool *)data; // Pre-lock mutex rs = hthread_mutex_lock(tp->task_queue_mutex); while (1) { // Check to see if there are any tasks to execute if (tp->total_tasks > 0) { // If so, then grab one task = get_task(tp); aprintf("TID %d, got task!\n",hthread_self()); if (task) { // If the task is valid, then release lock rs = hthread_mutex_unlock(tp->task_queue_mutex); // Execute task execute_task(task); free(task); // Yield to allow another thread to do some work if possible hthread_yield(); // Re-acquire for next round rs = hthread_mutex_lock(tp->task_queue_mutex); } else { // Otherwise, wait for tasks rs = hthread_cond_wait(tp->active_task, tp->task_queue_mutex); } } else { // Release lock and processor, let someone else do some work hthread_mutex_unlock(tp->task_queue_mutex); hthread_yield(); // Re-acquire hthread_mutex_lock(tp->task_queue_mutex); } } return (void*)99; }
static int get_statm(int pid, char * buffer) { struct task_struct ** p = get_task(pid); int i, tpag; int size=0, resident=0, share=0, trs=0, lrs=0, drs=0, dt=0; unsigned long ptbl, *buf, *pte, *pagedir, map_nr; if (!p || !*p) return 0; tpag = (*p)->end_code / PAGE_SIZE; if ((*p)->state != TASK_ZOMBIE) { pagedir = (void *)((*p)->tss.cr3 + ((*p)->start_code >> 20)); for (i = 0; i < 0x300; ++i) { if ((ptbl = pagedir[i]) == 0) { tpag -= 1024; continue; } buf = (void *)(ptbl & 0xfffff000); for (pte = buf; pte < (buf + 1024); ++pte) { if (*pte != 0) { ++size; if (*pte & 1) { ++resident; if (tpag > 0) ++trs; else ++drs; if (i >= 15 && i < 0x2f0) { ++lrs; if (*pte & 0x40) ++dt; else --drs; } map_nr = MAP_NR(*pte); if (map_nr < (high_memory / 4096) && mem_map[map_nr] > 1) ++share; } } --tpag; } } }