void chpl_task_stdModulesInitialized(void) { // // The task table is implemented in Chapel code in the modules, so // we can't use it, and thus can't support task reporting on ^C or // deadlock, until the other modules on which it depends have been // initialized and the supporting code here is set up. In this // function we're guaranteed that is true, because it is called only // after all the standard module initialization is complete. // // // Register this main task in the task table. // if (taskreport) { thread_private_data_t* tp = chpl_thread_getPrivateData(); chpldev_taskTable_add(tp->ptask->bundle.id, tp->ptask->bundle.lineno, tp->ptask->bundle.filename, (uint64_t) (intptr_t) tp->ptask); chpldev_taskTable_set_active(tp->ptask->bundle.id); chpl_thread_mutexInit(&taskTable_lock); } // // Now we can do task reporting if the user requested it. // do_taskReport = taskreport; }
void chpl_task_executeTasksInList(void** p_task_list_void) { task_pool_p* p_task_list_head = (task_pool_p*) p_task_list_void; task_pool_p curr_ptask; task_pool_p child_ptask; // // If we're serial, all the tasks have already been executed. // if (chpl_task_getSerial()) return; curr_ptask = get_current_ptask(); while (*p_task_list_head != NULL) { chpl_fn_p task_to_run_fun = NULL; // begin critical section chpl_thread_mutexLock(&threading_lock); if ((child_ptask = *p_task_list_head) != NULL) { task_to_run_fun = child_ptask->fun; dequeue_task(child_ptask); } // end critical section chpl_thread_mutexUnlock(&threading_lock); if (task_to_run_fun == NULL) continue; set_current_ptask(child_ptask); // begin critical section chpl_thread_mutexLock(&extra_task_lock); extra_task_cnt++; // end critical section chpl_thread_mutexUnlock(&extra_task_lock); if (do_taskReport) { chpl_thread_mutexLock(&taskTable_lock); chpldev_taskTable_set_suspended(curr_ptask->id); chpldev_taskTable_set_active(child_ptask->id); chpl_thread_mutexUnlock(&taskTable_lock); } if (blockreport) initializeLockReportForThread(); chpl_task_do_callbacks(chpl_task_cb_event_kind_begin, child_ptask->filename, child_ptask->lineno, child_ptask->id, child_ptask->is_executeOn); (*task_to_run_fun)(child_ptask->arg); chpl_task_do_callbacks(chpl_task_cb_event_kind_end, child_ptask->filename, child_ptask->lineno, child_ptask->id, child_ptask->is_executeOn); if (do_taskReport) { chpl_thread_mutexLock(&taskTable_lock); chpldev_taskTable_set_active(curr_ptask->id); chpldev_taskTable_remove(child_ptask->id); chpl_thread_mutexUnlock(&taskTable_lock); } // begin critical section chpl_thread_mutexLock(&extra_task_lock); extra_task_cnt--; // end critical section chpl_thread_mutexUnlock(&extra_task_lock); set_current_ptask(curr_ptask); chpl_mem_free(child_ptask, 0, 0); } }
// // When we create a thread it runs this wrapper function, which just // executes tasks out of the pool as they become available. // static void thread_begin(void* ptask_void) { task_pool_p ptask; thread_private_data_t *tp; tp = (thread_private_data_t*) chpl_mem_alloc(sizeof(thread_private_data_t), CHPL_RT_MD_THREAD_PRV_DATA, 0, 0); chpl_thread_setPrivateData(tp); tp->lockRprt = NULL; if (blockreport) initializeLockReportForThread(); while (true) { // // wait for a task to be present in the task pool // // In revision 22137, we investigated whether it was beneficial to // implement this while loop in a hybrid style, where depending on // the number of tasks available, idle threads would either yield or // wait on a condition variable to waken them. Through analysis, we // realized this could potential create a case where a thread would // become stranded, waiting for a condition signal that would never // come. A potential solution to this was to keep a count of threads // that were waiting on the signal, but since there was a performance // impact from keeping it as a hybrid as opposed to merely yielding, // it was decided that we would return to the simple yield case. while (!task_pool_head) { if (set_block_loc(0, CHPL_FILE_IDX_IDLE_TASK)) { // all other tasks appear to be blocked struct timeval deadline, now; gettimeofday(&deadline, NULL); deadline.tv_sec += 1; do { chpl_thread_yield(); if (!task_pool_head) gettimeofday(&now, NULL); } while (!task_pool_head && (now.tv_sec < deadline.tv_sec || (now.tv_sec == deadline.tv_sec && now.tv_usec < deadline.tv_usec))); if (!task_pool_head) { check_for_deadlock(); } } else { do { chpl_thread_yield(); } while (!task_pool_head); } unset_block_loc(); } // // Just now the pool had at least one task in it. Lock and see if // there's something still there. // chpl_thread_mutexLock(&threading_lock); if (!task_pool_head) { chpl_thread_mutexUnlock(&threading_lock); continue; } // // We've found a task to run. // if (blockreport) progress_cnt++; // // start new task; increment running count and remove task from pool // also add to task to task-table (structure in ChapelRuntime that keeps // track of currently running tasks for task-reports on deadlock or // Ctrl+C). // ptask = task_pool_head; idle_thread_cnt--; running_task_cnt++; dequeue_task(ptask); // end critical section chpl_thread_mutexUnlock(&threading_lock); tp->ptask = ptask; if (do_taskReport) { chpl_thread_mutexLock(&taskTable_lock); chpldev_taskTable_set_active(ptask->id); chpl_thread_mutexUnlock(&taskTable_lock); } chpl_task_do_callbacks(chpl_task_cb_event_kind_begin, ptask->filename, ptask->lineno, ptask->id, ptask->is_executeOn); (*ptask->fun)(ptask->arg); chpl_task_do_callbacks(chpl_task_cb_event_kind_end, ptask->filename, ptask->lineno, ptask->id, ptask->is_executeOn); if (do_taskReport) { chpl_thread_mutexLock(&taskTable_lock); chpldev_taskTable_remove(ptask->id); chpl_thread_mutexUnlock(&taskTable_lock); } tp->ptask = NULL; chpl_mem_free(ptask, 0, 0); // begin critical section chpl_thread_mutexLock(&threading_lock); // // finished task; decrement running count and increment idle count // assert(running_task_cnt > 0); running_task_cnt--; idle_thread_cnt++; // end critical section chpl_thread_mutexUnlock(&threading_lock); } }
void chpl_task_executeTasksInList(void** p_task_list_void) { task_pool_p* p_task_list_head = (task_pool_p*) p_task_list_void; task_pool_p curr_ptask; task_pool_p child_ptask; // Note: this function needs to tolerate an empty task // list. That will happen for coforalls inside a serial block, say. curr_ptask = get_current_ptask(); while (*p_task_list_head != NULL) { chpl_fn_p task_to_run_fun = NULL; // begin critical section chpl_thread_mutexLock(&threading_lock); if ((child_ptask = *p_task_list_head) != NULL) { task_to_run_fun = child_ptask->bundle.requested_fn; dequeue_task(child_ptask); } // end critical section chpl_thread_mutexUnlock(&threading_lock); if (task_to_run_fun == NULL) continue; set_current_ptask(child_ptask); // begin critical section chpl_thread_mutexLock(&extra_task_lock); extra_task_cnt++; // end critical section chpl_thread_mutexUnlock(&extra_task_lock); if (do_taskReport) { chpl_thread_mutexLock(&taskTable_lock); chpldev_taskTable_set_suspended(curr_ptask->bundle.id); chpldev_taskTable_set_active(child_ptask->bundle.id); chpl_thread_mutexUnlock(&taskTable_lock); } if (blockreport) initializeLockReportForThread(); chpl_task_do_callbacks(chpl_task_cb_event_kind_begin, child_ptask->bundle.requested_fid, child_ptask->bundle.filename, child_ptask->bundle.lineno, child_ptask->bundle.id, child_ptask->bundle.is_executeOn); (*task_to_run_fun)(&child_ptask->bundle); chpl_task_do_callbacks(chpl_task_cb_event_kind_end, child_ptask->bundle.requested_fid, child_ptask->bundle.filename, child_ptask->bundle.lineno, child_ptask->bundle.id, child_ptask->bundle.is_executeOn); if (do_taskReport) { chpl_thread_mutexLock(&taskTable_lock); chpldev_taskTable_set_active(curr_ptask->bundle.id); chpldev_taskTable_remove(child_ptask->bundle.id); chpl_thread_mutexUnlock(&taskTable_lock); } // begin critical section chpl_thread_mutexLock(&extra_task_lock); extra_task_cnt--; // end critical section chpl_thread_mutexUnlock(&extra_task_lock); set_current_ptask(curr_ptask); chpl_mem_free(child_ptask, 0, 0); } }