void ZZookeeperziCore_d2Ei(StgStablePtr the_stableptr, HsInt32 a1, HsPtr a2) { Capability *cap; HaskellObj ret; cap = rts_lock(); cap=rts_evalIO(cap,rts_apply(cap,(HaskellObj)runIO_closure,rts_apply(cap,rts_apply(cap,(StgClosure*)deRefStablePtr(the_stableptr),rts_mkInt32(cap,a1)),rts_mkPtr(cap,a2))) ,&ret); rts_checkSchedStatus("ZZookeeperziCore_d2Ei",cap); rts_unlock(cap); }
HsFloat convert_hs(HsFloat a1, HsFloat a2) { Capability *cap; HaskellObj ret; HsFloat cret; cap = rts_lock(); cap=rts_evalIO(cap,rts_apply(cap,(HaskellObj)runNonIO_closure,rts_apply(cap,rts_apply(cap,&HaskellConvert_zdfconvertzuhszuawO_closure,rts_mkFloat(cap,a1)),rts_mkFloat(cap,a2))) ,&ret); rts_checkSchedStatus("convert_hs",cap); cret=rts_getFloat(ret); rts_unlock(cap); return cret; }
/* * Function: startSignalHandlers() * * Run the handlers associated with the stacked up console events. Console * event delivery is blocked for the duration of this call. */ void startSignalHandlers(Capability *cap) { StgStablePtr handler; if (console_handler < 0) { return; } blockUserSignals(); ACQUIRE_LOCK(&sched_mutex); handler = deRefStablePtr((StgStablePtr)console_handler); while (stg_pending_events > 0) { stg_pending_events--; scheduleThread(cap, createIOThread(cap, RtsFlags.GcFlags.initialStkSize, rts_apply(cap, (StgClosure *)handler, rts_mkInt(cap, stg_pending_buf[stg_pending_events])))); } RELEASE_LOCK(&sched_mutex); unblockUserSignals(); }
void my_main(void) { Capability *cap; HaskellObj ret; cap = rts_lock(); cap=rts_evalIO(cap,rts_apply(cap,(HaskellObj)runIO_closure,&HsSdlTest_zdfmyzumainzua1OV_closure) ,&ret); rts_checkSchedStatus("my_main",cap); rts_unlock(cap); }
static void gtk2hs_closure_marshal(GClosure *closure, GValue *return_value, guint n_param_values, const GValue *param_values, gpointer invocation_hint, gpointer marshal_data) { Gtk2HsClosure *hc = (Gtk2HsClosure *)closure; HaskellObj call, ret; #ifdef GHC_RTS_USES_CAPABILITY Capability *cap; #else SchedulerStatus cap; #endif guint i; WHEN_DEBUG(g_debug("gtk2hs_closure_marshal(%p): about to run callback, n_param_values=%d", hc->callback, n_param_values)); #ifdef GHC_RTS_USES_CAPABILITY cap = rts_lock(); #else rts_lock(); #endif call = (StgClosure *)deRefStablePtr(hc->callback); /* construct the function call */ for (i = 0; i < n_param_values; i++) { WHEN_DEBUG(g_debug("gtk2hs_closure_marshal(%p): param_values[%d]=%s :: %s", hc->callback, i, g_strdup_value_contents(¶m_values[i]), g_type_name(G_VALUE_TYPE(¶m_values[i])))); call = rts_apply(CAP call, gtk2hs_value_as_haskellobj(CAP ¶m_values[i])); } WHEN_DEBUG(g_debug("gtk2hs_closure_marshal(%p): about to rts_evalIO", hc->callback)); /* perform the call */ #if __GLASGOW_HASKELL__>=704 rts_evalIO(&cap, rts_apply(CAP (HaskellObj)runIO_closure, call),&ret); #else cap=rts_evalIO(CAP rts_apply(CAP (HaskellObj)runIO_closure, call),&ret); #endif WHEN_DEBUG(g_debug("gtk2hs_closure_marshal(%p): about to rts_checkSchedStatus", hc->callback)); /* barf if anything went wrong */ /* TODO: pass a sensible value for call site so we get better error messages */ /* or perhaps we can propogate any error? */ rts_checkSchedStatus("gtk2hs_closure_marshal", cap); WHEN_DEBUG(g_debug("gtk2hs_closure_marshal(%p): ret=%p", hc->callback, ret)); if (return_value) { WHEN_DEBUG(g_debug("gtk2hs_closure_marshal(%p): return_value :: %s, ret=%p, UNTAG_CLOSURE(ret)=%p", hc->callback, /* g_strdup_value_contents(return_value), */ g_type_name(G_VALUE_TYPE(return_value)), ret, UNTAG_CLOSURE(ret))); gtk2hs_value_from_haskellobj(return_value, ret); } #ifdef GHC_RTS_USES_CAPABILITY rts_unlock(cap); #else rts_unlock(); #endif WHEN_DEBUG(g_debug("gtk2hs_closure_marshal(%p): done running callback", hc->callback)); }
void scheduleFinalizers(Capability *cap, StgWeak *list) { StgWeak *w; StgTSO *t; StgMutArrPtrs *arr; StgWord size; nat n, i; Task *task; task = myTask(); if (task != NULL) { task->running_finalizers = rtsTrue; } // count number of finalizers, and kill all the weak pointers first... n = 0; for (w = list; w; w = w->link) { StgArrWords *farr; // Better not be a DEAD_WEAK at this stage; the garbage // collector removes DEAD_WEAKs from the weak pointer list. ASSERT(w->header.info != &stg_DEAD_WEAK_info); if (w->finalizer != &stg_NO_FINALIZER_closure) { n++; } farr = (StgArrWords *)UNTAG_CLOSURE(w->cfinalizer); if ((StgClosure *)farr != &stg_NO_FINALIZER_closure) runCFinalizer((void *)farr->payload[0], (void *)farr->payload[1], (void *)farr->payload[2], farr->payload[3]); #ifdef PROFILING // A weak pointer is inherently used, so we do not need to call // LDV_recordDead(). // // Furthermore, when PROFILING is turned on, dead weak // pointers are exactly as large as weak pointers, so there is // no need to fill the slop, either. See stg_DEAD_WEAK_info // in StgMiscClosures.hc. #endif SET_HDR(w, &stg_DEAD_WEAK_info, w->header.prof.ccs); } if (task != NULL) { task->running_finalizers = rtsFalse; } // No finalizers to run? if (n == 0) return; debugTrace(DEBUG_weak, "weak: batching %d finalizers", n); size = n + mutArrPtrsCardTableSize(n); arr = (StgMutArrPtrs *)allocate(cap, sizeofW(StgMutArrPtrs) + size); TICK_ALLOC_PRIM(sizeofW(StgMutArrPtrs), n, 0); SET_HDR(arr, &stg_MUT_ARR_PTRS_FROZEN_info, CCS_SYSTEM); arr->ptrs = n; arr->size = size; n = 0; for (w = list; w; w = w->link) { if (w->finalizer != &stg_NO_FINALIZER_closure) { arr->payload[n] = w->finalizer; n++; } } // set all the cards to 1 for (i = n; i < size; i++) { arr->payload[i] = (StgClosure *)(W_)(-1); } t = createIOThread(cap, RtsFlags.GcFlags.initialStkSize, rts_apply(cap, rts_apply(cap, (StgClosure *)runFinalizerBatch_closure, rts_mkInt(cap,n)), (StgClosure *)arr) ); scheduleThread(cap,t); }