void workerTaskStop (Task *task) { DEBUG_ONLY( OSThreadId id ); DEBUG_ONLY( id = osThreadId() ); ASSERT(task->id == id); ASSERT(myTask() == task); ACQUIRE_LOCK(&all_tasks_mutex); if (task->all_prev) { task->all_prev->all_next = task->all_next; } else { all_tasks = task->all_next; } if (task->all_next) { task->all_next->all_prev = task->all_prev; } currentWorkerCount--; RELEASE_LOCK(&all_tasks_mutex); freeTask(task); }
void runAllCFinalizers(StgWeak *list) { StgWeak *w; Task *task; task = myTask(); if (task != NULL) { task->running_finalizers = rtsTrue; } for (w = list; w; w = w->link) { StgArrWords *farr; farr = (StgArrWords *)UNTAG_CLOSURE(w->cfinalizer); if ((StgClosure *)farr != &stg_NO_FINALIZER_closure) runCFinalizer((void *)farr->payload[0], (void *)farr->payload[1], (void *)farr->payload[2], farr->payload[3]); } if (task != NULL) { task->running_finalizers = rtsFalse; } }
void sendMessage(Capability *from_cap, Capability *to_cap, Message *msg) { ACQUIRE_LOCK(&to_cap->lock); #ifdef DEBUG { const StgInfoTable *i = msg->header.info; if (i != &stg_MSG_THROWTO_info && i != &stg_MSG_BLACKHOLE_info && i != &stg_MSG_TRY_WAKEUP_info && i != &stg_IND_info && // can happen if a MSG_BLACKHOLE is revoked i != &stg_WHITEHOLE_info) { barf("sendMessage: %p", i); } } #endif msg->link = to_cap->inbox; to_cap->inbox = msg; recordClosureMutated(from_cap,(StgClosure*)msg); if (to_cap->running_task == NULL) { to_cap->running_task = myTask(); // precond for releaseCapability_() releaseCapability_(to_cap,rtsFalse); } else { interruptCapability(to_cap); } RELEASE_LOCK(&to_cap->lock); }
void runAllCFinalizers(StgWeak *list) { StgWeak *w; Task *task; task = myTask(); if (task != NULL) { task->running_finalizers = true; } for (w = list; w; w = w->link) { // We need to filter out DEAD_WEAK objects, because it's not guaranteed // that the list will not have them when shutting down. // They only get filtered out during GC for the generation they // belong to. // If there's no major GC between the time that the finalizer for the // object from the oldest generation is manually called and shutdown // we end up running the same finalizer twice. See #7170. if (w->header.info != &stg_DEAD_WEAK_info) { runCFinalizers((StgCFinalizerList *)w->cfinalizers); } } if (task != NULL) { task->running_finalizers = false; } }
/* Let foreign code get the current Capability -- assuming there is one! * This is useful for unsafe foreign calls because they are called with * the current Capability held, but they are not passed it. For example, * see see the integer-gmp package which calls allocate() in its * stgAllocForGMP() function (which gets called by gmp functions). * */ Capability * rts_unsafeGetMyCapability (void) { #if defined(THREADED_RTS) return myTask()->cap; #else return &MainCapability; #endif }
/* Hack: we assume that we're building a batch-mode system unless * INTERPRETER is set */ #ifndef INTERPRETER /* Hack */ static void real_main(void) { int exit_status; SchedulerStatus status; /* all GranSim/GUM init is done in startupHaskell; sets IAmMainThread! */ startupHaskell(progargc,progargv,NULL); /* kick off the computation by creating the main thread with a pointer to mainIO_closure representing the computation of the overall program; then enter the scheduler with this thread and off we go; the same for GranSim (we have only one instance of this code) in a parallel setup, where we have many instances of this code running on different PEs, we should do this only for the main PE (IAmMainThread is set in startupHaskell) */ /* ToDo: want to start with a larger stack size */ { Capability *cap = rts_lock(); cap = rts_evalLazyIO(cap,progmain_closure, NULL); status = rts_getSchedStatus(cap); taskTimeStamp(myTask()); rts_unlock(cap); } /* check the status of the entire Haskell computation */ switch (status) { case Killed: errorBelch("main thread exited (uncaught exception)"); exit_status = EXIT_KILLED; break; case Interrupted: errorBelch("interrupted"); exit_status = EXIT_INTERRUPTED; break; case HeapExhausted: exit_status = EXIT_HEAPOVERFLOW; break; case Success: exit_status = EXIT_SUCCESS; break; default: barf("main thread completed with invalid status"); } shutdownHaskellAndExit(exit_status); }
static Task * allocTask (void) { Task *task; task = myTask(); if (task != NULL) { return task; } else { task = newTask(rtsFalse); #if defined(THREADED_RTS) task->id = osThreadId(); #endif setMyTask(task); return task; } }
void runAllCFinalizers(StgWeak *list) { StgWeak *w; Task *task; task = myTask(); if (task != NULL) { task->running_finalizers = rtsTrue; } for (w = list; w; w = w->link) { runCFinalizers((StgCFinalizerList *)w->cfinalizers); } if (task != NULL) { task->running_finalizers = rtsFalse; } }
void boundTaskExiting (Task *task) { #if defined(THREADED_RTS) ASSERT(osThreadId() == task->id); #endif ASSERT(myTask() == task); endInCall(task); // Set task->stopped, but only if this is the last call (#4850). // Remember that we might have a worker Task that makes a foreign // call and then a callback, so it can transform into a bound // Task for the duration of the callback. if (task->incall == NULL) { task->stopped = rtsTrue; } debugTrace(DEBUG_sched, "task exiting"); }
void freeMyTask (void) { Task *task; task = myTask(); if (task == NULL) return; if (!task->stopped) { errorBelch( "freeMyTask() called, but the Task is not stopped; ignoring"); return; } if (task->worker) { errorBelch("freeMyTask() called on a worker; ignoring"); return; } ACQUIRE_LOCK(&all_tasks_mutex); if (task->all_prev) { task->all_prev->all_next = task->all_next; } else { all_tasks = task->all_next; } if (task->all_next) { task->all_next->all_prev = task->all_prev; } taskCount--; RELEASE_LOCK(&all_tasks_mutex); freeTask(task); setMyTask(NULL); }
void scheduleFinalizers(Capability *cap, StgWeak *list) { StgWeak *w; StgTSO *t; StgMutArrPtrs *arr; StgWord size; nat n, i; Task *task; task = myTask(); if (task != NULL) { task->running_finalizers = rtsTrue; } // count number of finalizers, and kill all the weak pointers first... n = 0; for (w = list; w; w = w->link) { StgArrWords *farr; // Better not be a DEAD_WEAK at this stage; the garbage // collector removes DEAD_WEAKs from the weak pointer list. ASSERT(w->header.info != &stg_DEAD_WEAK_info); if (w->finalizer != &stg_NO_FINALIZER_closure) { n++; } farr = (StgArrWords *)UNTAG_CLOSURE(w->cfinalizer); if ((StgClosure *)farr != &stg_NO_FINALIZER_closure) runCFinalizer((void *)farr->payload[0], (void *)farr->payload[1], (void *)farr->payload[2], farr->payload[3]); #ifdef PROFILING // A weak pointer is inherently used, so we do not need to call // LDV_recordDead(). // // Furthermore, when PROFILING is turned on, dead weak // pointers are exactly as large as weak pointers, so there is // no need to fill the slop, either. See stg_DEAD_WEAK_info // in StgMiscClosures.hc. #endif SET_HDR(w, &stg_DEAD_WEAK_info, w->header.prof.ccs); } if (task != NULL) { task->running_finalizers = rtsFalse; } // No finalizers to run? if (n == 0) return; debugTrace(DEBUG_weak, "weak: batching %d finalizers", n); size = n + mutArrPtrsCardTableSize(n); arr = (StgMutArrPtrs *)allocate(cap, sizeofW(StgMutArrPtrs) + size); TICK_ALLOC_PRIM(sizeofW(StgMutArrPtrs), n, 0); SET_HDR(arr, &stg_MUT_ARR_PTRS_FROZEN_info, CCS_SYSTEM); arr->ptrs = n; arr->size = size; n = 0; for (w = list; w; w = w->link) { if (w->finalizer != &stg_NO_FINALIZER_closure) { arr->payload[n] = w->finalizer; n++; } } // set all the cards to 1 for (i = n; i < size; i++) { arr->payload[i] = (StgClosure *)(W_)(-1); } t = createIOThread(cap, RtsFlags.GcFlags.initialStkSize, rts_apply(cap, rts_apply(cap, (StgClosure *)runFinalizerBatch_closure, rts_mkInt(cap,n)), (StgClosure *)arr) ); scheduleThread(cap,t); }