コード例 #1
0
ファイル: tasks-fifo.c プロジェクト: RobChrystie/chapel
static void movedTaskWrapper(void* a) {
  movedTaskWrapperDesc_t* pmtwd = (movedTaskWrapperDesc_t*) a;
  if (pmtwd->countRunning)
    chpl_taskRunningCntInc(0, 0);
  (pmtwd->fp)(pmtwd->arg);
  if (pmtwd->countRunning)
    chpl_taskRunningCntDec(0, 0);
  chpl_mem_free(pmtwd, 0, 0);
}
コード例 #2
0
ファイル: tasks-qthreads.c プロジェクト: Improbus/CPEG614
void chpl_task_stdModulesInitialized(void)
{
    //
    // It's not safe to call the module code to count the main task as
    // running until after the modules have been initialized.  That's
    // when this function is called, so now count the main task.
    //
    canCountRunningTasks = true;
    chpl_taskRunningCntInc(0, NULL);
}
コード例 #3
0
ファイル: chpl-init.c プロジェクト: DawidvC/chapel
//
// Pre-user-code hook
//
// This is called on all locales.  The call on locale 0 is made from the
// compiler-emitted code in chpl_gen_main(), right before we enter user
// code.  The call on non-0 locales is made from chpl_main(), above.
//
void chpl_rt_preUserCodeHook(void) {
  chpl_comm_barrier("pre-user-code hook begin");

  chpl_taskRunningCntReset(0, 0);
  if (chpl_nodeID == 0) {
    chpl_taskRunningCntInc(0, 0);
  }

  //
  // Set up any memory tracking requested.
  //
  chpl_setMemFlags();

  chpl_comm_barrier("pre-user-code hook end");
}
コード例 #4
0
ファイル: tasks-fifo.c プロジェクト: ct-clmsn/chapel
static void
taskCallWrapper(void* a) {
  taskCallWrapperDesc_t* ptcwd = (taskCallWrapperDesc_t*) a;
  if (ptcwd->countRunning)
    chpl_taskRunningCntInc(0, 0);

  if (ptcwd->arg_copy != NULL) {
    (ptcwd->fp)(ptcwd->arg_copy);
    chpl_mem_free(ptcwd->arg_copy, 0, 0);
  } else
    (ptcwd->fp)(ptcwd->arg);

  if (ptcwd->countRunning)
    chpl_taskRunningCntDec(0, 0);
  chpl_mem_free(ptcwd, 0, 0);
}
コード例 #5
0
ファイル: tasks-qthreads.c プロジェクト: Improbus/CPEG614
static aligned_t chapel_wrapper(void *arg)
{
    chpl_qthread_wrapper_args_t *rarg = arg;
    chpl_qthread_tls_t * data = chpl_qthread_get_tasklocal();

    data->task_filename = rarg->task_filename;
    data->task_lineno = rarg->lineno;
    data->chpl_data = rarg->chpl_data;
    data->lock_filename = NULL;
    data->lock_lineno = 0;

    if (rarg->countRunning) {
        chpl_taskRunningCntInc(0, NULL);
    }

    (*(chpl_fn_p)(rarg->fn))(rarg->args);

    if (rarg->countRunning) {
        chpl_taskRunningCntDec(0, NULL);
    }

    return 0;
}
コード例 #6
0
ファイル: tasks-fifo.c プロジェクト: ct-clmsn/chapel
void chpl_task_stdModulesInitialized(void) {
  //
  // It's not safe to call the module code to count the main task as
  // running until after the modules have been initialized.
  //
  canCountRunningTasks = true;
  chpl_taskRunningCntInc(0, 0);

  //
  // The task table is implemented in Chapel code in the modules, so
  // we can't use it, and thus can't support task reporting on ^C or
  // deadlock, until the other modules on which it depends have been
  // initialized and the supporting code here is set up.  In this
  // function we're guaranteed that is true, because it is called only
  // after all the standard module initialization is complete.
  //

  //
  // Register this main task in the task table.
  //
  if (taskreport) {
    thread_private_data_t* tp = chpl_thread_getPrivateData();

    chpldev_taskTable_add(tp->ptask->id,
                          tp->ptask->lineno, tp->ptask->filename,
                          (uint64_t) (intptr_t) tp->ptask);
    chpldev_taskTable_set_active(tp->ptask->id);

    chpl_thread_mutexInit(&taskTable_lock);
  }

  //
  // Now we can do task reporting if the user requested it.
  //
  do_taskReport = taskreport;
}
コード例 #7
0
ファイル: tasks-fifo.c プロジェクト: jcazzie/chapel
void chpl_task_executeTasksInList(void** p_task_list_void) {
  task_pool_p* p_task_list_head = (task_pool_p*) p_task_list_void;
  task_pool_p curr_ptask;
  task_pool_p child_ptask;

  //
  // If we're serial, all the tasks have already been executed.
  //
  if (chpl_task_getSerial())
    return;

  curr_ptask = get_current_ptask();

  while (*p_task_list_head != NULL) {
    chpl_fn_p task_to_run_fun = NULL;

    // begin critical section
    chpl_thread_mutexLock(&threading_lock);

    if ((child_ptask = *p_task_list_head) != NULL) {
      task_to_run_fun = child_ptask->bundle.requested_fn;
      dequeue_task(child_ptask);
    }

    // end critical section
    chpl_thread_mutexUnlock(&threading_lock);

    if (task_to_run_fun == NULL)
      continue;

    set_current_ptask(child_ptask);

    // begin critical section
    chpl_thread_mutexLock(&extra_task_lock);

    extra_task_cnt++;

    // end critical section
    chpl_thread_mutexUnlock(&extra_task_lock);

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_set_suspended(curr_ptask->bundle.id);
      chpldev_taskTable_set_active(child_ptask->bundle.id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    if (blockreport)
      initializeLockReportForThread();

    chpl_task_do_callbacks(chpl_task_cb_event_kind_begin,
                           child_ptask->bundle.requested_fid,
                           child_ptask->bundle.filename,
                           child_ptask->bundle.lineno,
                           child_ptask->bundle.id,
                           child_ptask->bundle.is_executeOn);

    if (child_ptask->bundle.countRunning)
        chpl_taskRunningCntInc(0, 0);

    (*task_to_run_fun)(&child_ptask->bundle);

    if (child_ptask->bundle.countRunning)
        chpl_taskRunningCntDec(0, 0);

    chpl_task_do_callbacks(chpl_task_cb_event_kind_end,
                           child_ptask->bundle.requested_fid,
                           child_ptask->bundle.filename,
                           child_ptask->bundle.lineno,
                           child_ptask->bundle.id,
                           child_ptask->bundle.is_executeOn);

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_set_active(curr_ptask->bundle.id);
      chpldev_taskTable_remove(child_ptask->bundle.id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    // begin critical section
    chpl_thread_mutexLock(&extra_task_lock);

    extra_task_cnt--;

    // end critical section
    chpl_thread_mutexUnlock(&extra_task_lock);

    set_current_ptask(curr_ptask);
    chpl_mem_free(child_ptask, 0, 0);

  }
}
コード例 #8
0
ファイル: tasks-fifo.c プロジェクト: jcazzie/chapel
//
// When we create a thread it runs this wrapper function, which just
// executes tasks out of the pool as they become available.
//
static void
thread_begin(void* ptask_void) {
  task_pool_p ptask;
  thread_private_data_t *tp;

  tp = (thread_private_data_t*) chpl_mem_alloc(sizeof(thread_private_data_t),
                                               CHPL_RT_MD_THREAD_PRV_DATA,
                                               0, 0);
  chpl_thread_setPrivateData(tp);

  tp->lockRprt = NULL;
  if (blockreport)
    initializeLockReportForThread();

  while (true) {
    //
    // wait for a task to be present in the task pool
    //

    // In revision 22137, we investigated whether it was beneficial to
    // implement this while loop in a hybrid style, where depending on
    // the number of tasks available, idle threads would either yield or
    // wait on a condition variable to waken them.  Through analysis, we
    // realized this could potential create a case where a thread would
    // become stranded, waiting for a condition signal that would never
    // come.  A potential solution to this was to keep a count of threads
    // that were waiting on the signal, but since there was a performance
    // impact from keeping it as a hybrid as opposed to merely yielding,
    // it was decided that we would return to the simple yield case.
    while (!task_pool_head) {
      if (set_block_loc(0, CHPL_FILE_IDX_IDLE_TASK)) {
        // all other tasks appear to be blocked
        struct timeval deadline, now;
        gettimeofday(&deadline, NULL);
        deadline.tv_sec += 1;
        do {
          chpl_thread_yield();
          if (!task_pool_head)
            gettimeofday(&now, NULL);
        } while (!task_pool_head
                 && (now.tv_sec < deadline.tv_sec
                     || (now.tv_sec == deadline.tv_sec
                         && now.tv_usec < deadline.tv_usec)));
        if (!task_pool_head) {
          check_for_deadlock();
        }
      }
      else {
        do {
          chpl_thread_yield();
        } while (!task_pool_head);
      }

      unset_block_loc();
    }
 
    //
    // Just now the pool had at least one task in it.  Lock and see if
    // there's something still there.
    //
    chpl_thread_mutexLock(&threading_lock);
    if (!task_pool_head) {
      chpl_thread_mutexUnlock(&threading_lock);
      continue;
    }

    //
    // We've found a task to run.
    //

    if (blockreport)
      progress_cnt++;

    //
    // start new task; increment running count and remove task from pool
    // also add to task to task-table (structure in ChapelRuntime that keeps
    // track of currently running tasks for task-reports on deadlock or
    // Ctrl+C).
    //
    ptask = task_pool_head;
    idle_thread_cnt--;
    running_task_cnt++;

    dequeue_task(ptask);

    // end critical section
    chpl_thread_mutexUnlock(&threading_lock);

    tp->ptask = ptask;

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_set_active(ptask->bundle.id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    chpl_task_do_callbacks(chpl_task_cb_event_kind_begin,
                           ptask->bundle.requested_fid,
                           ptask->bundle.filename,
                           ptask->bundle.lineno,
                           ptask->bundle.id,
                           ptask->bundle.is_executeOn);

    if (ptask->bundle.countRunning)
        chpl_taskRunningCntInc(0, 0);

    (ptask->bundle.requested_fn)(&ptask->bundle);

    if (ptask->bundle.countRunning)
        chpl_taskRunningCntDec(0, 0);

    chpl_task_do_callbacks(chpl_task_cb_event_kind_end,
                           ptask->bundle.requested_fid,
                           ptask->bundle.filename,
                           ptask->bundle.lineno,
                           ptask->bundle.id,
                           ptask->bundle.is_executeOn);

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_remove(ptask->bundle.id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    tp->ptask = NULL;
    chpl_mem_free(ptask, 0, 0);

    // begin critical section
    chpl_thread_mutexLock(&threading_lock);

    //
    // finished task; decrement running count and increment idle count
    //
    assert(running_task_cnt > 0);
    running_task_cnt--;
    idle_thread_cnt++;

    // end critical section
    chpl_thread_mutexUnlock(&threading_lock);
  }
}