コード例 #1
0
ファイル: tasks-fifo.c プロジェクト: ct-clmsn/chapel
static void sync_wait_and_lock(chpl_sync_aux_t *s,
                               chpl_bool want_full,
                               int32_t lineno, int32_t filename) {
  chpl_bool suspend_using_cond;

  chpl_thread_mutexLock(&s->lock);

  // If we're oversubscribing the hardware, we wait using conditionals
  // in order to ensure fairness and thus progress.  If we're not, we
  // can spin-wait.
  suspend_using_cond = (chpl_thread_getNumThreads() >=
                        chpl_getNumLogicalCpus(true));

  while (s->is_full != want_full) {
    if (!suspend_using_cond) {
      chpl_thread_mutexUnlock(&s->lock);
    }
    if (set_block_loc(lineno, filename)) {
      // all other tasks appear to be blocked
      struct timeval deadline, now;
      chpl_bool timed_out = false;
      // default value so that always allows condition to be true if not
      // using conditionals

      gettimeofday(&deadline, NULL);
      deadline.tv_sec += 1;
      do {
        if (suspend_using_cond)
          timed_out = chpl_thread_sync_suspend(s, &deadline);
        else
          chpl_thread_yield();
        
        if (s->is_full != want_full && !timed_out)
          gettimeofday(&now, NULL);
      } while (s->is_full != want_full
               && !timed_out
               && (now.tv_sec < deadline.tv_sec
                   || (now.tv_sec == deadline.tv_sec
                       && now.tv_usec < deadline.tv_usec)));
      if (s->is_full != want_full)
        check_for_deadlock();
    }
    else {
      do {
        if (suspend_using_cond)
          (void) chpl_thread_sync_suspend(s, NULL);
        else
          chpl_thread_yield();
      } while (s->is_full != want_full);
    }
    unset_block_loc();
    if (!suspend_using_cond)
      chpl_thread_mutexLock(&s->lock);
  }

  if (blockreport)
    progress_cnt++;
}
コード例 #2
0
ファイル: tasks-fifo.c プロジェクト: ct-clmsn/chapel
//
// Inform task management that the thread (task) is about to suspend
// waiting for a sync or single variable to change state or the task
// pool to become nonempty.  The return value is true if the program
// may be deadlocked, indicating that the thread should use a timeout
// deadline on its suspension if possible, and false otherwise.
//
static chpl_bool set_block_loc(int lineno, int32_t filename) {
  thread_private_data_t* tp;
  chpl_bool isLastUnblockedThread;

  if (!blockreport)
    return false;

  isLastUnblockedThread = false;

  tp = get_thread_private_data();
  tp->lockRprt->filename = filename;
  tp->lockRprt->lineno = lineno;
  tp->lockRprt->prev_progress_cnt = progress_cnt;
  tp->lockRprt->maybeLocked = true;

  // Begin critical section
  chpl_thread_mutexLock(&block_report_lock);

  blocked_thread_cnt++;
  if (blocked_thread_cnt >= chpl_thread_getNumThreads()) {
    isLastUnblockedThread = true;
  }

  // End critical section
  chpl_thread_mutexUnlock(&block_report_lock);

  return isLastUnblockedThread;
}
コード例 #3
0
ファイル: tasks-fifo.c プロジェクト: RobChrystie/chapel
void chpl_task_startMovedTask(chpl_fn_p fp,
                              void* a,
                              c_sublocid_t subloc,
                              chpl_taskID_t id,
                              chpl_bool serial_state) {
  movedTaskWrapperDesc_t* pmtwd;
  chpl_task_prvDataImpl_t private = {
    .prvdata = { .serial_state = serial_state } };

  assert(subloc == 0 || subloc == c_sublocid_any);
  assert(id == chpl_nullTaskID);

  pmtwd = (movedTaskWrapperDesc_t*)
          chpl_mem_alloc(sizeof(*pmtwd),
                         CHPL_RT_MD_THREAD_PRV_DATA,
                         0, 0);
  *pmtwd = (movedTaskWrapperDesc_t)
           { fp, a, canCountRunningTasks,
             private };

  // begin critical section
  chpl_thread_mutexLock(&threading_lock);

  (void) add_to_task_pool(movedTaskWrapper, pmtwd, true, pmtwd->chpl_data,
                          NULL, false, 0, CHPL_FILE_IDX_UNKNOWN);

  // end critical section
  chpl_thread_mutexUnlock(&threading_lock);
}
コード例 #4
0
ファイル: tasks-fifo.c プロジェクト: bollu/chapel
void chpl_task_addToTaskList(chpl_fn_int_t fid,
                             chpl_task_bundle_t* arg, size_t arg_size,
                             c_sublocid_t subloc,
                             void** p_task_list_void,
                             int32_t task_list_locale,
                             chpl_bool is_begin_stmt,
                             int lineno,
                             int32_t filename) {
  assert(subloc == c_sublocid_any);

  // begin critical section
  chpl_thread_mutexLock(&threading_lock);

  if (task_list_locale == chpl_nodeID) {
    (void) add_to_task_pool(fid, chpl_ftable[fid], arg, arg_size,
                            false, (task_pool_p*) p_task_list_void,
                            is_begin_stmt, lineno, filename);

  }
  else {
    //
    // is_begin_stmt should be true here because if task_list_locale !=
    // chpl_nodeID, then this function could not have been called from
    // the context of a cobegin or coforall statement.
    //
    assert(is_begin_stmt);
    (void) add_to_task_pool(fid, chpl_ftable[fid], arg, arg_size,
                            false, NULL, true, 0, CHPL_FILE_IDX_UNKNOWN);
  }

  // end critical section
  chpl_thread_mutexUnlock(&threading_lock);
}
コード例 #5
0
ファイル: tasks-fifo.c プロジェクト: ct-clmsn/chapel
//
// Get a new task ID.
//
static chpl_taskID_t get_next_task_id(void) {
  static chpl_taskID_t       id = chpl_nullTaskID + 1;

  chpl_taskID_t              next_id;

  chpl_thread_mutexLock(&task_id_lock);
  next_id = id++;
  chpl_thread_mutexUnlock(&task_id_lock);

  return next_id;
}
コード例 #6
0
ファイル: tasks-fifo.c プロジェクト: ct-clmsn/chapel
int32_t  chpl_task_getNumBlockedTasks(void) {
  if (blockreport) {
    int numBlockedTasks;

    // begin critical section
    chpl_thread_mutexLock(&threading_lock);
    chpl_thread_mutexLock(&block_report_lock);

    numBlockedTasks = blocked_thread_cnt - idle_thread_cnt;

    // end critical section
    chpl_thread_mutexUnlock(&block_report_lock);
    chpl_thread_mutexUnlock(&threading_lock);

    assert(numBlockedTasks >= 0);
    return numBlockedTasks;
  }
  else
    return 0;
}
コード例 #7
0
ファイル: tasks-fifo.c プロジェクト: ct-clmsn/chapel
// create a task from the given function pointer and arguments
// and append it to the end of the task pool
// assumes threading_lock has already been acquired!
static inline
task_pool_p add_to_task_pool(chpl_fn_p fp,
                             void* a,
                             chpl_bool is_executeOn,
                             chpl_task_prvDataImpl_t chpl_data,
                             task_pool_p* p_task_list_head,
                             chpl_bool is_begin_stmt,
                             int lineno, int32_t filename) {
  task_pool_p ptask =
    (task_pool_p) chpl_mem_alloc(sizeof(task_pool_t),
                                        CHPL_RT_MD_TASK_POOL_DESC,
                                        0, 0);
  ptask->id           = get_next_task_id();
  ptask->fun          = fp;
  ptask->arg          = a;
  ptask->is_executeOn = is_executeOn;
  ptask->chpl_data    = chpl_data;
  ptask->filename     = filename;
  ptask->lineno       = lineno;
  ptask->p_list_head  = NULL;
  ptask->next         = NULL;

  enqueue_task(ptask, p_task_list_head);

  chpl_task_do_callbacks(chpl_task_cb_event_kind_create,
                         ptask->filename,
                         ptask->lineno,
                         ptask->id,
                         ptask->is_executeOn);

  if (do_taskReport) {
    chpl_thread_mutexLock(&taskTable_lock);
    chpldev_taskTable_add(ptask->id,
                          ptask->lineno, ptask->filename,
                          (uint64_t) (intptr_t) ptask);
    chpl_thread_mutexUnlock(&taskTable_lock);
  }

  //
  // If we now have more tasks than threads to run them on (taking
  // into account that the current parent of a structured parallel
  // construct can run at least one of that construct's children),
  // try to start another thread.
  //
  if (queued_task_cnt > idle_thread_cnt &&
      (p_task_list_head == NULL || ptask->list_next != NULL || is_begin_stmt)) {
    maybe_add_thread();
  }

  return ptask;
}
コード例 #8
0
ファイル: tasks-fifo.c プロジェクト: bollu/chapel
static inline
void taskCallBody(chpl_fn_int_t fid, chpl_fn_p fp,
                  chpl_task_bundle_t* arg, size_t arg_size,
                  c_sublocid_t subloc,
                  int lineno, int32_t filename) {
  // begin critical section
  chpl_thread_mutexLock(&threading_lock);

  (void) add_to_task_pool(fid, fp, arg, arg_size, true,
                          NULL, false, lineno, filename);

  // end critical section
  chpl_thread_mutexUnlock(&threading_lock);
}
コード例 #9
0
ファイル: tasks-fifo.c プロジェクト: ct-clmsn/chapel
//
// Inform task management that the thread (task) is no longer suspended.
//
static void unset_block_loc(void) {
  if (!blockreport)
    return;

  get_thread_private_data()->lockRprt->maybeLocked = false;

  // Begin critical section
  chpl_thread_mutexLock(&block_report_lock);

  blocked_thread_cnt--;

  // End critical section
  chpl_thread_mutexUnlock(&block_report_lock);
}
コード例 #10
0
ファイル: tasks-fifo.c プロジェクト: ct-clmsn/chapel
void chpl_task_addToTaskList(chpl_fn_int_t fid, void* arg,
                             c_sublocid_t subloc,
                             void** p_task_list_void,
                             int32_t task_list_locale,
                             chpl_bool is_begin_stmt,
                             int lineno,
                             int32_t filename) {
  task_pool_p curr_ptask = get_current_ptask();
  chpl_task_prvDataImpl_t chpl_data =
    { .prvdata = { .serial_state = curr_ptask->chpl_data.prvdata.serial_state }
    };

  assert(subloc == 0 || subloc == c_sublocid_any);

  if (chpl_data.prvdata.serial_state) {
    (*chpl_ftable[fid])(arg);
    return;
  }

  // begin critical section
  chpl_thread_mutexLock(&threading_lock);

  if (task_list_locale == chpl_nodeID) {
    (void) add_to_task_pool(chpl_ftable[fid], arg, false, chpl_data,
                            (task_pool_p*) p_task_list_void, is_begin_stmt,
                            lineno, filename);

  }
  else {
    //
    // is_begin_stmt should be true here because if task_list_locale !=
    // chpl_nodeID, then this function could not have been called from
    // the context of a cobegin or coforall statement.
    //
    assert(is_begin_stmt);
    (void) add_to_task_pool(chpl_ftable[fid], arg, false, chpl_data,
                            NULL, true, 0, CHPL_FILE_IDX_UNKNOWN);
  }

  // end critical section
  chpl_thread_mutexUnlock(&threading_lock);
}
コード例 #11
0
ファイル: tasks-fifo.c プロジェクト: ct-clmsn/chapel
//
// This function should be called exactly once per thread (not task!),
// including the main thread. It should be called before the first task
// this thread was created to do is started.
//
// Our handling of lock report list entries could be improved.  We
// allocate one each time this function is called, and this is called
// just before each task wrapper is called.  We never remove these
// from the list or deallocate them.  If we do traverse the list while
// reporting a deadlock, we just skip the leaked ones, because they
// don't say "blocked".
//
static void initializeLockReportForThread(void) {
  lockReport_t* newLockReport;

  newLockReport = (lockReport_t*) chpl_mem_alloc(sizeof(lockReport_t),
                                                 CHPL_RT_MD_LOCK_REPORT_DATA,
                                                 0, 0);
  newLockReport->maybeLocked = false;
  newLockReport->next = NULL;

  get_thread_private_data()->lockRprt = newLockReport;

  // Begin critical section
  chpl_thread_mutexLock(&block_report_lock);
  if (lockReportHead) {
    lockReportTail->next = newLockReport;
    lockReportTail = newLockReport;
  } else {
    lockReportHead = newLockReport;
    lockReportTail = newLockReport;
  }
  // End critical section
  chpl_thread_mutexUnlock(&block_report_lock);
}
コード例 #12
0
ファイル: tasks-fifo.c プロジェクト: ct-clmsn/chapel
static inline
void taskCallBody(chpl_fn_p fp, void* arg, void* arg_copy,
                  c_sublocid_t subloc, chpl_bool serial_state,
                  int lineno, int32_t filename) {
  taskCallWrapperDesc_t* ptcwd;
  chpl_task_prvDataImpl_t private = {
    .prvdata = { .serial_state = serial_state } };

  ptcwd = (taskCallWrapperDesc_t*)
          chpl_mem_alloc(sizeof(*ptcwd),
                         CHPL_RT_MD_THREAD_PRV_DATA,
                         0, 0);
  *ptcwd = (taskCallWrapperDesc_t)
    { fp, arg, arg_copy, canCountRunningTasks, private };

  // begin critical section
  chpl_thread_mutexLock(&threading_lock);

  (void) add_to_task_pool(taskCallWrapper, ptcwd, true, ptcwd->chpl_data,
                          NULL, false, lineno, filename);

  // end critical section
  chpl_thread_mutexUnlock(&threading_lock);
}
コード例 #13
0
ファイル: tasks-fifo.c プロジェクト: ct-clmsn/chapel
//
// When we create a thread it runs this wrapper function, which just
// executes tasks out of the pool as they become available.
//
static void
thread_begin(void* ptask_void) {
  task_pool_p ptask;
  thread_private_data_t *tp;

  tp = (thread_private_data_t*) chpl_mem_alloc(sizeof(thread_private_data_t),
                                               CHPL_RT_MD_THREAD_PRV_DATA,
                                               0, 0);
  chpl_thread_setPrivateData(tp);

  tp->lockRprt = NULL;
  if (blockreport)
    initializeLockReportForThread();

  while (true) {
    //
    // wait for a task to be present in the task pool
    //

    // In revision 22137, we investigated whether it was beneficial to
    // implement this while loop in a hybrid style, where depending on
    // the number of tasks available, idle threads would either yield or
    // wait on a condition variable to waken them.  Through analysis, we
    // realized this could potential create a case where a thread would
    // become stranded, waiting for a condition signal that would never
    // come.  A potential solution to this was to keep a count of threads
    // that were waiting on the signal, but since there was a performance
    // impact from keeping it as a hybrid as opposed to merely yielding,
    // it was decided that we would return to the simple yield case.
    while (!task_pool_head) {
      if (set_block_loc(0, CHPL_FILE_IDX_IDLE_TASK)) {
        // all other tasks appear to be blocked
        struct timeval deadline, now;
        gettimeofday(&deadline, NULL);
        deadline.tv_sec += 1;
        do {
          chpl_thread_yield();
          if (!task_pool_head)
            gettimeofday(&now, NULL);
        } while (!task_pool_head
                 && (now.tv_sec < deadline.tv_sec
                     || (now.tv_sec == deadline.tv_sec
                         && now.tv_usec < deadline.tv_usec)));
        if (!task_pool_head) {
          check_for_deadlock();
        }
      }
      else {
        do {
          chpl_thread_yield();
        } while (!task_pool_head);
      }

      unset_block_loc();
    }
 
    //
    // Just now the pool had at least one task in it.  Lock and see if
    // there's something still there.
    //
    chpl_thread_mutexLock(&threading_lock);
    if (!task_pool_head) {
      chpl_thread_mutexUnlock(&threading_lock);
      continue;
    }

    //
    // We've found a task to run.
    //

    if (blockreport)
      progress_cnt++;

    //
    // start new task; increment running count and remove task from pool
    // also add to task to task-table (structure in ChapelRuntime that keeps
    // track of currently running tasks for task-reports on deadlock or
    // Ctrl+C).
    //
    ptask = task_pool_head;
    idle_thread_cnt--;
    running_task_cnt++;

    dequeue_task(ptask);

    // end critical section
    chpl_thread_mutexUnlock(&threading_lock);

    tp->ptask = ptask;

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_set_active(ptask->id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    chpl_task_do_callbacks(chpl_task_cb_event_kind_begin,
                           ptask->filename,
                           ptask->lineno,
                           ptask->id,
                           ptask->is_executeOn);

    (*ptask->fun)(ptask->arg);

    chpl_task_do_callbacks(chpl_task_cb_event_kind_end,
                           ptask->filename,
                           ptask->lineno,
                           ptask->id,
                           ptask->is_executeOn);

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_remove(ptask->id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    tp->ptask = NULL;
    chpl_mem_free(ptask, 0, 0);

    // begin critical section
    chpl_thread_mutexLock(&threading_lock);

    //
    // finished task; decrement running count and increment idle count
    //
    assert(running_task_cnt > 0);
    running_task_cnt--;
    idle_thread_cnt++;

    // end critical section
    chpl_thread_mutexUnlock(&threading_lock);
  }
}
コード例 #14
0
ファイル: tasks-fifo.c プロジェクト: ct-clmsn/chapel
void chpl_task_executeTasksInList(void** p_task_list_void) {
  task_pool_p* p_task_list_head = (task_pool_p*) p_task_list_void;
  task_pool_p curr_ptask;
  task_pool_p child_ptask;

  //
  // If we're serial, all the tasks have already been executed.
  //
  if (chpl_task_getSerial())
    return;

  curr_ptask = get_current_ptask();

  while (*p_task_list_head != NULL) {
    chpl_fn_p task_to_run_fun = NULL;

    // begin critical section
    chpl_thread_mutexLock(&threading_lock);

    if ((child_ptask = *p_task_list_head) != NULL) {
      task_to_run_fun = child_ptask->fun;
      dequeue_task(child_ptask);
    }

    // end critical section
    chpl_thread_mutexUnlock(&threading_lock);

    if (task_to_run_fun == NULL)
      continue;

    set_current_ptask(child_ptask);

    // begin critical section
    chpl_thread_mutexLock(&extra_task_lock);

    extra_task_cnt++;

    // end critical section
    chpl_thread_mutexUnlock(&extra_task_lock);

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_set_suspended(curr_ptask->id);
      chpldev_taskTable_set_active(child_ptask->id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    if (blockreport)
      initializeLockReportForThread();

    chpl_task_do_callbacks(chpl_task_cb_event_kind_begin,
                           child_ptask->filename,
                           child_ptask->lineno,
                           child_ptask->id,
                           child_ptask->is_executeOn);

    (*task_to_run_fun)(child_ptask->arg);

    chpl_task_do_callbacks(chpl_task_cb_event_kind_end,
                           child_ptask->filename,
                           child_ptask->lineno,
                           child_ptask->id,
                           child_ptask->is_executeOn);

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_set_active(curr_ptask->id);
      chpldev_taskTable_remove(child_ptask->id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    // begin critical section
    chpl_thread_mutexLock(&extra_task_lock);

    extra_task_cnt--;

    // end critical section
    chpl_thread_mutexUnlock(&extra_task_lock);

    set_current_ptask(curr_ptask);
    chpl_mem_free(child_ptask, 0, 0);

  }
}
コード例 #15
0
ファイル: tasks-fifo.c プロジェクト: ct-clmsn/chapel
void chpl_sync_lock(chpl_sync_aux_t *s) {
  chpl_thread_mutexLock(&s->lock);
}
コード例 #16
0
ファイル: tasks-fifo.c プロジェクト: bollu/chapel
void chpl_task_executeTasksInList(void** p_task_list_void) {
  task_pool_p* p_task_list_head = (task_pool_p*) p_task_list_void;
  task_pool_p curr_ptask;
  task_pool_p child_ptask;

  // Note: this function needs to tolerate an empty task
  // list. That will happen for coforalls inside a serial block, say.

  curr_ptask = get_current_ptask();

  while (*p_task_list_head != NULL) {
    chpl_fn_p task_to_run_fun = NULL;

    // begin critical section
    chpl_thread_mutexLock(&threading_lock);

    if ((child_ptask = *p_task_list_head) != NULL) {
      task_to_run_fun = child_ptask->bundle.requested_fn;
      dequeue_task(child_ptask);
    }

    // end critical section
    chpl_thread_mutexUnlock(&threading_lock);

    if (task_to_run_fun == NULL)
      continue;

    set_current_ptask(child_ptask);

    // begin critical section
    chpl_thread_mutexLock(&extra_task_lock);

    extra_task_cnt++;

    // end critical section
    chpl_thread_mutexUnlock(&extra_task_lock);

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_set_suspended(curr_ptask->bundle.id);
      chpldev_taskTable_set_active(child_ptask->bundle.id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    if (blockreport)
      initializeLockReportForThread();

    chpl_task_do_callbacks(chpl_task_cb_event_kind_begin,
                           child_ptask->bundle.requested_fid,
                           child_ptask->bundle.filename,
                           child_ptask->bundle.lineno,
                           child_ptask->bundle.id,
                           child_ptask->bundle.is_executeOn);

    (*task_to_run_fun)(&child_ptask->bundle);

    chpl_task_do_callbacks(chpl_task_cb_event_kind_end,
                           child_ptask->bundle.requested_fid,
                           child_ptask->bundle.filename,
                           child_ptask->bundle.lineno,
                           child_ptask->bundle.id,
                           child_ptask->bundle.is_executeOn);

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_set_active(curr_ptask->bundle.id);
      chpldev_taskTable_remove(child_ptask->bundle.id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    // begin critical section
    chpl_thread_mutexLock(&extra_task_lock);

    extra_task_cnt--;

    // end critical section
    chpl_thread_mutexUnlock(&extra_task_lock);

    set_current_ptask(curr_ptask);
    chpl_mem_free(child_ptask, 0, 0);

  }
}
コード例 #17
0
ファイル: tasks-fifo.c プロジェクト: jcazzie/chapel
// create a task from the given function pointer and arguments
// and append it to the end of the task pool
// assumes threading_lock has already been acquired!
static inline
task_pool_p add_to_task_pool(chpl_fn_int_t fid, chpl_fn_p fp,
                             chpl_task_bundle_t* a, size_t a_size,
                             chpl_bool serial_state,
                             chpl_bool countRunningTasks,
                             chpl_bool is_executeOn,
                             task_pool_p* p_task_list_head,
                             chpl_bool is_begin_stmt,
                             int lineno, int32_t filename) {


  size_t payload_size;
  task_pool_p ptask;
  chpl_task_prvDataImpl_t pv;

  memset(&pv, 0, sizeof(pv));

  assert(a_size >= sizeof(chpl_task_bundle_t));

  payload_size = a_size - sizeof(chpl_task_bundle_t);
  ptask = (task_pool_p) chpl_mem_alloc(sizeof(task_pool_t) + payload_size,
                                       CHPL_RT_MD_TASK_ARG_AND_POOL_DESC,
                                       lineno, filename);

  memcpy(&ptask->bundle, a, a_size);

  ptask->p_list_head            = NULL;
  ptask->list_next              = NULL;
  ptask->list_prev              = NULL;
  ptask->next                   = NULL;
  ptask->prev                   = NULL;
  ptask->chpl_data              = pv;
  ptask->bundle.serial_state    = serial_state;
  ptask->bundle.countRunning    = countRunningTasks;
  ptask->bundle.is_executeOn    = is_executeOn;
  ptask->bundle.lineno          = lineno;
  ptask->bundle.filename        = filename;
  ptask->bundle.requestedSubloc = c_sublocid_any_val;
  ptask->bundle.requested_fid   = fid;
  ptask->bundle.requested_fn    = fp;
  ptask->bundle.id              = get_next_task_id();

  enqueue_task(ptask, p_task_list_head);

  chpl_task_do_callbacks(chpl_task_cb_event_kind_create,
                         ptask->bundle.requested_fid,
                         ptask->bundle.filename,
                         ptask->bundle.lineno,
                         ptask->bundle.id,
                         ptask->bundle.is_executeOn);

  if (do_taskReport) {
    chpl_thread_mutexLock(&taskTable_lock);
    chpldev_taskTable_add(ptask->bundle.id,
                          ptask->bundle.lineno, ptask->bundle.filename,
                          (uint64_t) (intptr_t) ptask);
    chpl_thread_mutexUnlock(&taskTable_lock);
  }

  //
  // If we now have more tasks than threads to run them on (taking
  // into account that the current parent of a structured parallel
  // construct can run at least one of that construct's children),
  // try to start another thread.
  //
  if (queued_task_cnt > idle_thread_cnt &&
      (p_task_list_head == NULL || ptask->list_next != NULL || is_begin_stmt)) {
    maybe_add_thread();
  }

  return ptask;
}