/* 指导语句: #pragma omp for 结构功能: for开始函数(无parallel时) 函数功能: 创建一个任务共享结构 */ int GOMP_loop_dynamic_start (long p1, long p2, long p3, long p4, long *p5, long *p6) { int res = 0; Record_Event Event = Event_init (); Event.event_name = "GOMP_loop_dynamic_start"; Event.eid = 220; Event.type = NONE; Event.omp_rank = get_thread_num (); Event.omp_level = get_level (); Event.p_rank = omp_get_ancestor_thread_num (get_level () - 1); GOMP_loop_dynamic_start_real = (int(*)(long,long,long,long,long*,long*)) dlsym (RTLD_NEXT, "GOMP_loop_dynamic_start"); if (GOMP_loop_dynamic_start_real != NULL) { Event.starttime = gettime (); res = GOMP_loop_dynamic_start_real (p1, p2, p3, p4, p5, p6); Event.endtime = gettime (); Record (&Event, OMPI_TRACE); } else { printf_d("GOMP_loop_dynamic_start is not hooked! exiting!!\n"); } return res; }
/* 指导语句: #pragma omp sections 结构功能: section调度函数 函数功能: 当一个线程结束其执行的任务时,调用该函数分配下一个任务 */ unsigned GOMP_sections_next (void) { Record_Event Event = Event_init (); Event.event_name = "GOMP_sections_next"; Event.eid = 224; Event.type = NONE; Event.omp_rank = get_thread_num (); Event.omp_level = get_level (); Event.p_rank = omp_get_ancestor_thread_num (get_level () - 1); unsigned res = 0; GOMP_sections_next_real=(unsigned(*)(void)) dlsym (RTLD_NEXT, "GOMP_sections_next"); if (GOMP_sections_next_real != NULL) { Event.starttime=gettime(); res = GOMP_sections_next_real(); Event.endtime=gettime(); Record(&Event, OMPI_TRACE); } else { printf_d("GOMP_sections_next is not hooked! exiting!!\n"); } return res; }
void ucs_log_fatal_error(const char *fmt, ...) { size_t buffer_size = ucs_global_opts.log_buffer_size; FILE *stream = stderr; char *buffer, *p; va_list ap; int ret; buffer = ucs_alloca(buffer_size + 1); p = buffer; /* Print hostname:pid */ snprintf(p, buffer_size, "[%s:%-5d:%d] ", ucs_log_hostname, ucs_log_pid, get_thread_num()); buffer_size -= strlen(p); p += strlen(p); /* Print rest of the message */ va_start(ap, fmt); vsnprintf(p, buffer_size, fmt, ap); va_end(ap); buffer_size -= strlen(p); p += strlen(p); /* Newline */ snprintf(p, buffer_size, "\n"); /* Flush stderr, and write the message directly to the pipe */ fflush(stream); ret = write(fileno(stream), buffer, strlen(buffer)); (void)ret; }
int append_request(IBP_REQUESTS_QUEUE * queue, IBP_REQUEST * request) { IBP_SUB_REQ_QUEUE * subQueue=NULL; int priority; int num; /* exclusive access */ pthread_mutex_lock(queue->lock); if ( queue->maxRequests > 0 && queue->requestsNum >= queue->maxRequests ){ /* queue is full */ pthread_mutex_unlock(queue->lock); return ( IBP_E_QUEUE_FULL); } priority = get_request_priority(request); assert( priority >= 0 && priority < queue->levels); subQueue = queue->subQueues[priority]; append_req_to_subqueue(subQueue,request); queue->requestsNum++; num = queue->requestsNum; pthread_mutex_unlock(queue->lock); pthread_cond_signal(queue->cond); if ( num > get_thread_num(glbThreadPool) ){ expand_thread_pool(glbThreadPool,thread_main,5); } return (IBP_OK); }
unsigned Extrae_get_thread_number (void) { #if defined(OMP_SUPPORT) && !defined(OMPT_INSTRUMENTATION) return omp_get_thread_num(); #elif defined(SMPSS_SUPPORT) return css_get_thread_num(); #elif defined(NANOS_SUPPORT) /* return nanos_extrae_get_thread_num(); */ return get_thread_num(); #elif defined(PTHREAD_SUPPORT) return Backend_GetpThreadIdentifier(); #elif defined(UPC_SUPPORT) return GetUPCthreadID(); #else return get_thread_num(); #endif }
/* 指导语句: #pragma omp for 结构功能: for预初始化函数(有parallel时) 函数功能: 预初始化一个任务共享结构 */ void GOMP_parallel_loop_guided_start(void *p1, void *p2, unsigned p3, long p4, long p5, long p6, long p7) { TaskInfo old_task; Record_Event Event = Event_init (); Event.event_name = "GOMP_parallel_loop_guided_start"; Event.eid = 212; Event.type = NONE; Event.omp_rank = get_thread_num (); Event.omp_level = get_level (); Event.p_rank = omp_get_ancestor_thread_num (get_level () - 1); old_task = current_task; //If current task is not exist, create a new task if (current_task.flag == 0) { current_task = create_itask (); Event.task_state_start = TASK_CREATE; } else Event.task_state_start = TASK_SUSPEND; create_team (current_task); Event.p_task_id_start = current_task.task_parent_id; Event.task_id_start = current_task.task_id; GOMP_parallel_loop_guided_start_real=(void(*)(void*,void*,unsigned, long, long, long, long)) dlsym (RTLD_NEXT, "GOMP_parallel_loop_guided_start"); if (GOMP_parallel_loop_guided_start_real != NULL) { pardo_uf = (void(*)(void*))p1; pardo_uf_id++; /*if (PAPI == PAPI_ON) retVal = PAPI_thread_init(get_thread_num()); if (retVal != PAPI_OK) ERROR_RETURN(retVal);*/ Event.starttime=gettime(); GOMP_parallel_loop_guided_start_real (callme_pardo, p2, p3, p4, p5, p6, p7); Event.endtime=gettime(); } else { printf_d("GOMP_parallel_loop_guided_start is not hooked! exiting!!\n"); } Event.p_task_id_end = current_task.task_parent_id; Event.task_id_end = current_task.task_id; if (old_task.flag == 0) Event.task_state_end = TASK_START; else Event.task_state_end = TASK_RESUME; Record(&Event, OMPI_TRACE); }
/* 指导语句: #pragma omp parallel 结构功能: parallel用户子函数 函数功能: parallel中调用的用户子函数 */ static void callme_par (void *p1) { TaskInfo old_task; char fun_name[30] = "Parallel_User_fun_"; char id [10]; Record_Event Event = Event_init (); old_task = current_task; current_task = create_itask (); //Is it necessary to add this task to the thread team ? add_itask (current_task); itoa (par_uf_id, id); strcat (fun_name, id); Event.event_name = fun_name; Event.eid = 234; Event.type = NONE; Event.omp_rank = get_thread_num (); Event.omp_level = get_level (); Event.p_rank = omp_get_ancestor_thread_num (get_level () - 1); Event.p_task_id_start = current_task.task_parent_id; Event.task_id_start = current_task.task_id; Event.task_state_start = TASK_CREATE; if (par_uf == NULL) { printf_d("Error! Invalid initialization of 'par_uf'\n"); return ; } if (PAPI == PAPI_ON) { PAPI_get_info (fun_name, 0, PAPI_THREAD); Event.starttime = gettime (); par_uf (p1); Event.endtime = gettime (); PAPI_get_info (fun_name, 1, PAPI_THREAD); } else { Event.starttime = gettime (); par_uf (p1); Event.endtime = gettime (); } Event.p_task_id_end = current_task.task_parent_id; Event.task_id_end = current_task.task_id; Event.task_state_end = TASK_END; remove_itask (current_task); current_task = old_task; Record (&Event, OMPI_TRACE); }
/* 指导语句: #pragma omp task 结构功能: task用户子函数 函数功能: task中调用的用户子函数 */ static void callme_task (void *p1) { TaskInfo old_task; struct eTask *task; char fun_name[30] = "Task_User_do_fun_"; char id [10]; Record_Event Event = Event_init (); old_task = current_task; task = etask_schedule (); current_task = task.task_info; current_task.thread_id = get_thread_id (get_level ()); // current_etask = task; itoa (task_uf_id, id); strcat (fun_name, id); Event.event_name = fun_name; Event.eid = 235; Event.type = NONE; Event.omp_rank = get_thread_num (); Event.omp_level = get_level (); Event.p_rank = omp_get_ancestor_thread_num (get_level () - 1); Event.p_task_id_start = current_task.task_parent_id; Event.task_id_start = current_task.task_id; Event.task_state_start = TASK_START; if (task_uf == NULL) { printf_d("Error! Invalid initialization of 'task_uf'\n"); return ; } if (PAPI == PAPI_ON) { PAPI_get_info (fun_name, 0, PAPI_THREAD); Event.starttime = gettime (); task_uf (p1); Event.endtime = gettime (); PAPI_get_info (fun_name, 1, PAPI_THREAD); } else { Event.starttime = gettime (); task_uf (p1); Event.endtime = gettime (); } Event.p_task_id_end = current_task.task_parent_id; Event.task_id_end = current_task.task_id; Event.task_state_end = TASK_END; remove_etask (task); current_task = old_task; Record (&Event, OMPI_TRACE); }
ucs_log_func_rc_t ucs_log_default_handler(const char *file, unsigned line, const char *function, ucs_log_level_t level, const char *prefix, const char *message, va_list ap) { size_t buffer_size = ucs_global_opts.log_buffer_size; const char *short_file; struct timeval tv; size_t length; char *buf; char *valg_buf; if (!ucs_log_enabled(level)) { return UCS_LOG_FUNC_RC_CONTINUE; } buf = ucs_alloca(buffer_size + 1); buf[buffer_size] = 0; strncpy(buf, prefix, buffer_size); length = strlen(buf); vsnprintf(buf + length, buffer_size - length, message, ap); gettimeofday(&tv, NULL); short_file = strrchr(file, '/'); short_file = (short_file == NULL) ? file : short_file + 1; if (RUNNING_ON_VALGRIND) { valg_buf = ucs_alloca(buffer_size + 1); snprintf(valg_buf, buffer_size, "[%lu.%06lu] %16s:%-4u %-4s %-5s %s\n", tv.tv_sec, tv.tv_usec, short_file, line, "UCX", ucs_log_level_names[level], buf); VALGRIND_PRINTF("%s", valg_buf); } else if (ucs_log_initialized) { fprintf(ucs_log_file, "[%lu.%06lu] [%s:%-5d:%d] %16s:%-4u %-4s %-5s %s\n", tv.tv_sec, tv.tv_usec, ucs_log_hostname, ucs_log_pid, get_thread_num(), short_file, line, "UCX", ucs_log_level_names[level], buf); } else { fprintf(stdout, "[%lu.%06lu] %16s:%-4u %-4s %-5s %s\n", tv.tv_sec, tv.tv_usec, short_file, line, "UCX", ucs_log_level_names[level], buf); } /* flush the log file if the log_level of this message is fatal or error */ if (level <= UCS_LOG_LEVEL_ERROR) { ucs_log_flush(); } return UCS_LOG_FUNC_RC_CONTINUE; }
void GOMP_task (void *p1, void *p2, void *p3,long p4, long p5, _Bool p6, unsigned p7) { struct eTask *task; int old_block = block_statue; Record_Event Event = Event_init (); Event.event_name = "GOMP_task"; Event.eid = 229; Event.type = NONE; Event.omp_rank = get_thread_num (); Event.omp_level = get_level (); Event.p_rank = omp_get_ancestor_thread_num (get_level () - 1); if (current_task.flag == 1) { Event.p_task_id_start = current_task.task_parent_id; Event.task_id_start = current_task.task_id; Event.task_state_start = TASK_CREATE; } if (p6) { /*Create a task that will be executed immediately*/ block_statue = RUN_AT_ONCE_BLOCK; } else { task = create_etask (); add_etask (task); } GOMP_task_real = (void(*)(void *,void *,void *,long,long,_Bool,unsigned))dlsym(RTLD_NEXT,"GOMP_task"); if(GOMP_task_real != NULL) { task_uf = (void(*)(void*))p1; task_uf_id++; Event.starttime = gettime(); GOMP_task_real (callme_task, p2, p3, p4, p5, p6, p7); Event.endtime = gettime(); } block_statue = old_block; if (current_task.flag == 1) { Event.p_task_id_end = current_task.task_parent_id; Event.task_id_end = current_task.task_id; Event.task_state_end = TASK_RESUME; } Record(&Event, OMPI_TRACE); }
/* 指导语句: #pragma omp parallel 结构功能: parallel开始函数 函数功能: 初始化一个parallel并行结构 */ void GOMP_parallel_start (void *p1, void *p2, unsigned p3) { TaskInfo old_task; Record_Event Event = Event_init (); //初始化 Event.event_name = "GOMP_parallel_start"; //获取函数名 Event.eid = 200; Event.type = NONE; Event.omp_rank = get_thread_num (); //获取线程编号 Event.omp_level = get_level (); Event.p_rank = omp_get_ancestor_thread_num (get_level () - 1); old_task = current_task; //If current task is not exist, create a new task if (current_task.flag == 0) { current_task = create_itask (); Event.task_state_start = TASK_CREATE; } else Event.task_state_start = TASK_SUSPEND; create_team (current_task); Event.p_task_id_start = current_task.task_parent_id; Event.task_id_start = current_task.task_id; /*dlsym函数返回 GOMP_parallel_start 在动态链接库中的下一个地址,供调用使用*/ GOMP_parallel_start_real = (void(*)(void*,void*,unsigned))dlsym (RTLD_NEXT, "GOMP_parallel_start"); if (GOMP_parallel_start_real != NULL) { par_uf = (void(*)(void*))p1; //调用子函数的包装函数 par_uf_id++; Event.starttime = gettime(); //获取开始时间 GOMP_parallel_start_real (callme_par, p2, p3); //调用OpenMP库中的GOMP_parallel_start()实现功能 Event.endtime = gettime (); //获取结束时间 } else { printf_d ("GOMP_parallel_start is not hooked! exiting!!\n"); } Event.p_task_id_end = current_task.task_parent_id; Event.task_id_end = current_task.task_id; if (old_task.flag == 0) Event.task_state_end = TASK_START; else Event.task_state_end = TASK_RESUME; Record (&Event, OMPI_TRACE); }
int main() { boost::scoped_ptr<thread_data> data_struct(new thread_data()); data_struct->thread_num = 42; hpx::threads::thread_id_type thread_id = hpx::threads::get_self_id(); hpx::threads::set_thread_data(thread_id, reinterpret_cast<std::size_t>(data_struct.get())); HPX_TEST_EQ(get_thread_num(), 42); return hpx::util::report_errors(); }
//Add a new explicit task to the team, waitting to be scheduled. //This function will only be called in Gomp_task () function int add_etask (struct eTask *task) { int i; int team_thread_id = get_thread_id (get_level () - 1); int num = get_thread_num (); struct eTask *parent_task; for (i = 0; i < MAX_TEAM_NUM && i < Team_num; ++i) { if (Team [i].team_flag == 1 && Team [i].task.thread_id == team_thread_id) { parent_task = Team [i].etask[num]; task->parent = parent_task; task->children = NULL; task.kind = WAITTING_TASK; if (parent_task->children) { task->next_child = parent_task->children; task->prev_child = parent_task->children->prev_child; task->next_child->prev_child = task; task->prev_child->next_child = task; } else { task->next_child = task; task->prev_child = task; } parent_task->children = task; if (Team [i]->task_queue) { task->next_queue = team->task_queue; task->prev_queue = team->task_queue->prev_queue; task->next_queue->prev_queue = task; task->prev_queue->next_queue = task; } else { task->next_queue = task; task->prev_queue = task; Team [i].task->queue = task; } return 0; } } return -1; }
/* 指导语句: #pragma omp parallel for 结构功能: for调度函数 函数功能: 当一个线程完成指定给它的任务时,调用该函数分配下个任务 */ int GOMP_loop_guided_next(long *p1, long *p2) { int res = 0; Record_Event Event = Event_init (); Event.event_name = "GOMP_loop_guided_next"; Event.eid = 216; Event.type = NONE; Event.omp_rank = get_thread_num (); Event.omp_level = get_level (); Event.p_rank = omp_get_ancestor_thread_num (get_level () - 1); Event.p_task_id_start = current_task.task_parent_id; Event.task_id_start = current_task.task_id; Event.task_state_start= TASK_END; GOMP_loop_guided_next_real=(int(*)(long*,long*)) dlsym (RTLD_NEXT, "GOMP_loop_guided_next"); if (GOMP_loop_guided_next_real != NULL) { Event.starttime=gettime(); res = GOMP_loop_guided_next_real (p1, p2); Event.endtime=gettime(); Record(&Event, OMPI_TRACE); } else { printf_d("GOMP_loop_guided_next is not hooked! exiting!!\n"); } if (res == 1) //Create a new task for this thread { current_task = create_itask (); Event.p_task_id_end = current_task.task_parent_id; Event.task_id_end= current_task.task_id; Event.task_state_end = TASK_CREATE; } else { current_task = get_current_task (); if (current_task.flag == 1) { Event.p_task_id_end = current_task.task_parent_id; Event.task_id_end= current_task.task_id; Event.task_state_end = TASK_RESUME; } } Record (&Event, OMPI_TRACE); return res; }
/* Search this thread's free list for a "new" node */ static unsigned int new_node() { int i; int t = get_thread_num(); for (i = 0; i < MAX_FREELIST; i++) { unsigned int node = load_32(&free_lists[t][i]); if (node) { store_32(&free_lists[t][i], 0); return node; } } /* free_list is empty? */ MODEL_ASSERT(0); return 0; }
//Add a new implicit task to the team int add_itask (struct iTask task) { int i; int team_thread_id = get_thread_id (get_level () - 1); int thread_num = get_thread_num (); for (i = 0; i < MAX_TEAM_NUM && i < Team_num; ++i) { if (Team [i].team_flag == 1 && Team [i].task.thread_id == team_thread_id) { Team [i].itask [thread_num] = task; return 0; } } return -1; }
static void thread_simple_spinner_main (void) { int thrd_num = get_thread_num(L4_Myself()); clear_counters(5); thread_simple_spinner_start = 1; while(thread_simple_spinner_cleanup == 0) { if(counter[thrd_num]++ % 50000 == 0) { VERBOSE_HIGH("### %d %d\n", (int)thrd_num, (int)counter[thrd_num]); } if (counter[thrd_num] > 0x00100000) thread_simple_spinner_cleanup = 1; // done } L4_Call(L4_Myself()); }
TaskInfo get_current_task () { TaskInfo task = {0}; int i, num = Team_num; int team_thread_id = get_thread_id (get_level () - 1); int thread_num = get_thread_num (); for (i = 0; i < MAX_TEAM_NUM && i < num; ++i) { if (Team [i].task.thread_id == team_thread_id && Team [i].team_flag == 1) { task = Team [i].itask [thread_num]; break; } } return task; }
//Remove an implicit task from the thread team int remove_itask (struct iTask task) { int i; int team_thread_id = get_thread_id (get_level () - 1); int thread_num = get_thread_num (); for (i = 0; i < Team_num; ++i) { if (Team [i].team_flag == 1 && Team [i].task.thread_id == team_thread_id) { Team [i].itask [thread_num].task_id = -1; Team [i].itask [thread_num].task_parent_id = -1; Team [i].itask [thread_num].flag = 0; return 0; } } return -1; }
static void thread_simple_spinner (void *arg) { int thrd_num = get_thread_num(L4_Myself()); int dummy; while(thread_simple_spinner_start == 0) ; while(thread_simple_spinner_cleanup == 0) { if(counter[thrd_num]++ % 50000 == 0) { VERBOSE_HIGH("### %d %d\n", (int)thrd_num, (int)counter[thrd_num]); } if (counter[thrd_num] > 0x00100000) dummy = 1; // done } L4_Call(L4_Myself()); }
void GOMP_barrier (void) { TaskInfo old_task; int old_block = block_statue; Record_Event Event = Event_init (); Event.event_name = "GOMP_barrier"; Event.eid = 202; Event.type = NONE; Event.omp_rank = get_thread_num (); Event.omp_level = get_level (); Event.p_rank = omp_get_ancestor_thread_num (get_level () - 1); old_task = current_task; if (current_task.flag == 1) { Event.p_task_id_start = current_task.task_parent_id; Event.task_id_start = current_task.task_id; Event.task_state_start = TASK_SUSPEND; } GOMP_barrier_real = (void(*)(void)) dlsym (RTLD_NEXT, "GOMP_barrier"); block_statue = BARRIER_BLOCK; if (GOMP_barrier_real != NULL) { Event.starttime = gettime(); GOMP_barrier_real (); Event.endtime = gettime (); } else printf_d ("GOMP_barrier is not hooked! exiting!!\n"); block_statue = old_block; current_task = old_task; if (current_task.flag == 1) { Event.p_task_id_end = current_task.task_parent_id; Event.task_id_end = current_task.task_id; Event.task_state_end = TASK_RESUME; } Record (&Event, OMPI_TRACE); }
/* 指导语句: #pragma omp parallel for 结构功能: for结束函数 函数功能: 结束一个任务共享结构,不同步所有线程 */ void GOMP_loop_end_nowait (void) { TaskInfo old_task; Record_Event Event = Event_init (); Event.event_name = "GOMP_loop_end_nowait"; Event.eid = 222; Event.type = NONE; Event.omp_rank = get_thread_num (); Event.omp_level = get_level (); Event.p_rank = omp_get_ancestor_thread_num (get_level () - 1); old_task = current_task; if (current_task.flag == 1) { Event.p_task_id_start = current_task.task_parent_id; Event.task_id_start = current_task.task_id; Event.task_state_start = TASK_WAIT; } GOMP_loop_end_nowait_real=(void(*)(void)) dlsym (RTLD_NEXT, "GOMP_loop_end_nowait"); if (GOMP_loop_end_nowait_real != NULL) { Event.starttime=gettime(); GOMP_loop_end_nowait_real(); Event.endtime=gettime(); } else { printf_d("GOMP_loop_end_nowait is not hooked! exiting!!\n"); } current_task = old_task; Event.p_task_id_end = current_task.task_parent_id; Event.task_id_end = current_task.task_id; Event.task_state_end = TASK_END; remove_team (); current_task = get_current_task (); Record(&Event, OMPI_TRACE); }
/* Place this node index back on this thread's free list */ static void reclaim(unsigned int node) { int i; int t = get_thread_num(); /* Don't reclaim NULL node */ MODEL_ASSERT(node); for (i = 0; i < MAX_FREELIST; i++) { /* Should never race with our own thread here */ unsigned int idx = load_32(&free_lists[t][i]); /* Found empty spot in free list */ if (idx == 0) { store_32(&free_lists[t][i], node); return; } } /* free list is full? */ MODEL_ASSERT(0); }
void GOMP_taskwait (void) { TaskInfo old_task; int old_block = block_statue; Record_Event Event = Event_init (); Event.event_name = "GOMP_taskwait"; Event.eid = 228; Event.type = NONE; Event.omp_rank = get_thread_num (); Event.omp_level = get_level (); Event.p_rank = omp_get_ancestor_thread_num (get_level () - 1); old_task = current_task; if (current_task.flag == 1) { Event.p_task_id_start = current_task.task_parent_id; Event.task_id_start = current_task.task_id; Event.task_state_start = TASK_SUSPEND; } GOMP_taskwait_real = (void(*)(void))dlsym(RTLD_NEXT,"GOMP_taskwait"); block_statue = TASKWAIT_BLOCK; if(GOMP_taskwait_real != NULL) { Event.starttime = gettime(); GOMP_taskwait_real (); Event.endtime = gettime (); } block_statue = old_block; current_task = old_task; if (current_task.flag == 1) { Event.p_task_id_end = current_task.task_parent_id; Event.task_id_end = current_task.task_id; Event.task_state_end = TASK_RESUME; } Record (&Event, OMPI_TRACE); }
void stop() { if (get_thread_num() != 0) { // workers don't come here until terminate() has been called int nv = Atomic::decrease_nv(&start_counter); // wait until all workers reached this step // all threads must agree that we are shutting // down before we can continue and invoke the // destructor startup_lock.lock(); startup_lock.unlock(); return; } start_executing(); // make sure threads have been started, or we will wait forever in barrier barrier_protocol.barrier(*threads[0]); startup_lock.lock(); for (int i = 1; i < get_num_cpus(); ++i) threads[i]->terminate(); // wait for all threads to join while (start_counter != 1) Atomic::rep_nop(); // signal that threads can destruct startup_lock.unlock(); for (int i = 1; i < get_num_cpus(); ++i) delete threads[i]; delete [] threads; delete [] task_queues; }
void GOMP_parallel_sections_start(void *p1, void *p2, unsigned p3, unsigned p4) { Record_Event Event = Event_init (); Event.event_name = "GOMP_parallel_sections_start"; Event.eid = 227; Event.type = NONE; Event.omp_rank = get_thread_num (); Event.omp_level = get_level (); Event.p_rank = omp_get_ancestor_thread_num (get_level () - 1); GOMP_parallel_sections_start_real=(void(*)(void*,void*,unsigned,unsigned)) dlsym (RTLD_NEXT, "GOMP_parallel_sections_start"); if (GOMP_parallel_sections_start_real != NULL) { Event.starttime=gettime(); GOMP_parallel_sections_start_real (p1, p2, p3, p4); Event.endtime=gettime(); Record(&Event, OMPI_TRACE); } else { printf_d("GOMP_parallel_sections_start is not hooked! exiting!!\n"); } }
void omp_unset_lock (int *p1) { Record_Event Event = Event_init (); Event.event_name = "omp_unset_lock"; Event.eid = 231; Event.type = NONE; Event.omp_rank = get_thread_num (); Event.omp_level = get_level (); Event.p_rank = omp_get_ancestor_thread_num (get_level () - 1); omp_unset_lock_real = (void(*)(int*)) dlsym (RTLD_NEXT, "omp_unset_lock"); if(omp_unset_lock_real!= NULL) { Event.starttime = gettime (); omp_unset_lock_real (p1); Event.endtime = gettime (); Record (&Event, OMPI_TRACE); } else { printf_d ("omp_unset_lock is not hooked! exiting!!\n"); } }
void GOMP_critical_name_end(void **p1) { Record_Event Event = Event_init (); Event.event_name = "GOMP_critical_name_end"; Event.eid = 204; Event.type = NONE; Event.omp_rank = get_thread_num (); Event.omp_level = get_level (); Event.p_rank = omp_get_ancestor_thread_num (get_level () - 1); GOMP_critical_name_end_real=(void(*)(void**)) dlsym (RTLD_NEXT, "GOMP_critical_name_end"); if (GOMP_critical_name_end_real != NULL) { Event.starttime=gettime(); GOMP_critical_name_end_real (p1); Event.endtime=gettime(); Record(&Event, OMPI_TRACE); } else { printf_d("GOMP_critical_name_end is not hooked! exiting!!\n"); } }
void omp_thread_func() { int tid = get_thread_num(); std::cout << "hello from thread " << tid << std::endl; }
struct eTask * etask_schedule () { struct eTask *task = NULL, *parent_task = NULL, *old_task = NULL; int i, num = -1, thread_num = 0; int team_thread_id = get_thread_id (get_level () - 1); for (i = 0; i < MAX_TEAM_NUM && i < Team_num; ++i) { if (Team [i].team_flag == 1 && Team [i].task.thread_id == team_thread_id) { num = i; break; } } if (num == -1) fprintf_d (stderr, "[OMP]Schedule Error!!!Please check the task scheduling strategy!!!\n"); if (block_statue == BARRIER_BLOCK || block_statue == NO_BOLCK) { if (Team [num].task_queue != NULL) { task = Team [num].task_queue; parent_task = task->parent; if (parent_task && parent_task->children == task) parent_task ->children = task->next_child; task->prev_queue->next_queue = task->next_queue; task->next_queue->prev_queue = task->prev_queue; if (task->next_queue != task) Team [num].task_queue = task->next_queue; else Team [num].task_queue = NULL; task->kind = RUNNING_TASK; } if (task) { parent_task = task->parent; if (parent_task) { task->prev_child->next_child = task->next_child; task->next_child->prev_child = task->prev_child; if (parent_task->next_child == task) { if (task->next_child != task) parent_task->children = task->next_child; else parent_task->children = NULL; } } } } else if (block_statue == TASKWAIT_BLOCK) { thread_num = get_thread_num (); old_task = Team [num].etask [thread_num]; if (old_task->children->kind == WAITTING_TASK) { task = old_task->children; old_task->children = task->next_child; task->prev_queue->next_queue = task->next_queue; task->next_queue->prev_queue = task->prev_queue; if (Team [num].task_queue == task) { if (task->next_queue != task) Team [num].task_queue = task->next_queue; else Team [num].task_queue = NULL; } task->kind = RUNNING_TASK; } if (task) { task->prev_child->next_child = task->next_child; task->next_child->prev_child = task->prev_child; if (old_task->children == task) { if (task->next_child != task) old_task->children = task->next_child; else old_task->children = NULL; } } } return task; }