Exemple #1
0
void
awaitEvent(rtsBool wait)
{
  do {
    /* Try to de-queue completed IO requests
     */
    workerWaitingForRequests = 1;
    awaitRequests(wait);
    workerWaitingForRequests = 0;

    // If a signal was raised, we need to service it
    // XXX the scheduler loop really should be calling
    // startSignalHandlers(), but this is the way that posix/Select.c
    // does it and I'm feeling too paranoid to refactor it today --SDM
    if (stg_pending_events != 0) {
        startSignalHandlers(&MainCapability);
        return;
    }

    // The return value from awaitRequests() is a red herring: ignore
    // it.  Return to the scheduler if !wait, or
    //
    //  - we were interrupted
    //  - the run-queue is now non- empty

  } while (wait
           && sched_state == SCHED_RUNNING
           && emptyRunQueue(&MainCapability)
      );
}
Exemple #2
0
void
awaitEvent(rtsBool wait)
{
  int ret;

  do {
    /* Try to de-queue completed IO requests
     */
    workerWaitingForRequests = 1;
    ret = awaitRequests(wait);
    workerWaitingForRequests = 0;
    if (!ret) { 
      return; /* still hold the lock */
    }

    // Return to the scheduler if:
    //
    //  - we were interrupted
    //  - new threads have arrived

  } while (wait
	   && sched_state == SCHED_RUNNING
	   && emptyRunQueue(&MainCapability)
      );
}
Exemple #3
0
StgClosure *
findSpark (Capability *cap)
{
  Capability *robbed;
  StgClosurePtr spark;
  rtsBool retry;
  nat i = 0;

  if (!emptyRunQueue(cap) || cap->returning_tasks_hd != NULL) {
      // If there are other threads, don't try to run any new
      // sparks: sparks might be speculative, we don't want to take
      // resources away from the main computation.
      return 0;
  }

  do {
      retry = rtsFalse;

      // first try to get a spark from our own pool.
      // We should be using reclaimSpark(), because it works without
      // needing any atomic instructions:
      //   spark = reclaimSpark(cap->sparks);
      // However, measurements show that this makes at least one benchmark
      // slower (prsa) and doesn't affect the others.
      spark = tryStealSpark(cap->sparks);
      while (spark != NULL && fizzledSpark(spark)) {
          cap->spark_stats.fizzled++;
          traceEventSparkFizzle(cap);
          spark = tryStealSpark(cap->sparks);
      }
      if (spark != NULL) {
          cap->spark_stats.converted++;

          // Post event for running a spark from capability's own pool.
          traceEventSparkRun(cap);

          return spark;
      }
      if (!emptySparkPoolCap(cap)) {
          retry = rtsTrue;
      }

      if (n_capabilities == 1) { return NULL; } // makes no sense...

      debugTrace(DEBUG_sched,
                 "cap %d: Trying to steal work from other capabilities", 
                 cap->no);

      /* visit cap.s 0..n-1 in sequence until a theft succeeds. We could
      start at a random place instead of 0 as well.  */
      for ( i=0 ; i < n_capabilities ; i++ ) {
          robbed = capabilities[i];
          if (cap == robbed)  // ourselves...
              continue;

          if (emptySparkPoolCap(robbed)) // nothing to steal here
              continue;

          spark = tryStealSpark(robbed->sparks);
          while (spark != NULL && fizzledSpark(spark)) {
              cap->spark_stats.fizzled++;
              traceEventSparkFizzle(cap);
              spark = tryStealSpark(robbed->sparks);
          }
          if (spark == NULL && !emptySparkPoolCap(robbed)) {
              // we conflicted with another thread while trying to steal;
              // try again later.
              retry = rtsTrue;
          }

          if (spark != NULL) {
              cap->spark_stats.converted++;
              traceEventSparkSteal(cap, robbed->no);
              
              return spark;
          }
          // otherwise: no success, try next one
      }
  } while (retry);

  debugTrace(DEBUG_sched, "No sparks stolen");
  return NULL;
}