myth_thread_t myth_eco_all_task_check(myth_running_env_t env) { myth_running_env_t busy_env; myth_thread_t next_run=NULL; int i=0; #ifdef MYTH_WS_PROF_DETAIL uint64_t t0,t1; t0=myth_get_rdtsc(); #endif while(i < g_worker_thread_num){ if(g_envs[i].c == RUNNING) { busy_env = &g_envs[i]; next_run = myth_queue_take(&busy_env->runnable_q); if(next_run){ next_run->env=env; return next_run; } } if(g_envs[i].c == FINISH){ return (myth_thread_t)FINISH; } i++; } return NULL; }
myth_thread_t myth_default_steal_func(int rank) { myth_running_env_t env,busy_env; myth_thread_t next_run=NULL; #ifdef MYTH_WS_PROF_DETAIL uint64_t t0,t1; t0=myth_get_rdtsc(); #endif //Choose a worker thread that seems to be busy env=&g_envs[rank]; busy_env=myth_env_get_first_busy(env); if (busy_env){ //int ws_victim; #if 0 #ifdef MYTH_SCHED_LOOP_DEBUG myth_dprintf("env %p is trying to steal thread from %p...\n",env,busy_env); #endif #endif //ws_victim=busy_env->rank; //Try to steal thread next_run=myth_queue_take(&busy_env->runnable_q); if (next_run){ #ifdef MYTH_SCHED_LOOP_DEBUG myth_dprintf("env %p is stealing thread %p from %p...\n",env,steal_th,busy_env); #endif myth_assert(next_run->status==MYTH_STATUS_READY); //Change worker thread descriptor } } #ifdef MYTH_WS_PROF_DETAIL t1=myth_get_rdtsc(); if (g_sched_prof){ env->prof_data.ws_attempt_count[busy_env->rank]++; if (next_run){ env->prof_data.ws_hit_cycles+=t1-t0; env->prof_data.ws_hit_cnt++; }else{ env->prof_data.ws_miss_cycles+=t1-t0; env->prof_data.ws_miss_cnt++; } } #endif return next_run; }
myth_thread_t myth_eco_steal(int rank) { myth_running_env_t env,busy_env; myth_thread_t next_run=NULL; #ifdef MYTH_WS_PROF_DETAIL uint64_t t0,t1; t0=myth_get_rdtsc(); #endif //Choose a worker thread that seems to be busy env=&g_envs[rank]; if(env->isSleepy == 1) { env->isSleepy = 0; busy_env = &g_envs[env->ws_target]; } else { busy_env=myth_env_get_first_busy(env); } if (busy_env){ myth_assert(busy_env!=env); //int ws_victim; //ws_victim=busy_env->rank; //Try to steal thread next_run=myth_queue_take(&busy_env->runnable_q); if (next_run){ #ifdef MYTH_SCHED_LOOP_DEBUG myth_dprintf("env %p is stealing thread %p from %p...\n",env,steal_th,busy_env); #endif myth_assert(next_run->status==MYTH_STATUS_READY); //Change worker thread descriptor next_run->env=env; } } if(!next_run) { if(busy_env->c == STEALING) { #ifdef MYTH_ECO_TEST if(env->thief_count < 3) { env->thief_count++; return 0; } #endif myth_sleep(); // This line seems not correct, it may occur infinite recursion //return myth_eco_steal(env->rank); return NULL; } else if(busy_env->c == SLEEPING) { MAY_BE_UNUSED int tmp = task_num; next_run = myth_eco_all_task_check(env); if(!next_run){ myth_sleep(); } else { return next_run; } } else if(busy_env->c == RUNNING) { // victim has one task and executing int tmp = task_num; next_run = myth_eco_all_task_check(env); if(!next_run){ myth_sleep_2(tmp); } else { return next_run; } } else if(busy_env->c == FINISH) { return (myth_thread_t)FINISH; } } #ifdef MYTH_WS_PROF_DETAIL t1=myth_get_rdtsc(); if (g_sched_prof){ env->prof_data.ws_attempt_count[busy_env->rank]++; if (next_run){ env->prof_data.ws_hit_cycles+=t1-t0; env->prof_data.ws_hit_cnt++; }else{ env->prof_data.ws_miss_cycles+=t1-t0; env->prof_data.ws_miss_cnt++; } } #endif #ifdef MYTH_ECO_TEST env->thief_count = 0; #endif return next_run; }