/* This is called when a thread hits an interrupt at a GC safe point. This means * that another thread is already trying to start a GC run, so we don't need to * try and do that, just enlist in the run. */ void MVM_gc_enter_from_interrupt(MVMThreadContext *tc) { AO_t curr; GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Entered from interrupt\n"); MVM_telemetry_timestamp(tc, "gc_enter_from_interrupt"); /* If profiling, record that GC is starting. */ if (tc->instance->profiling) MVM_profiler_log_gc_start(tc, is_full_collection(tc)); /* We'll certainly take care of our own work. */ tc->gc_work_count = 0; add_work(tc, tc); /* Indicate that we're ready to GC. Only want to decrement it if it's 2 or * greater (0 should never happen; 1 means the coordinator is still counting * up how many threads will join in, so we should wait until it decides to * decrement.) */ while ((curr = MVM_load(&tc->instance->gc_start)) < 2 || !MVM_trycas(&tc->instance->gc_start, curr, curr - 1)) { /* MVM_platform_thread_yield();*/ } /* Wait for all threads to indicate readiness to collect. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Waiting for other threads\n"); while (MVM_load(&tc->instance->gc_start)) { /* MVM_platform_thread_yield();*/ } GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Entering run_gc\n"); run_gc(tc, MVMGCWhatToDo_NoInstance); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : GC complete\n"); /* If profiling, record that GC is over. */ if (tc->instance->profiling) MVM_profiler_log_gc_end(tc); }
/* This is called when the allocator finds it has run out of memory and wants * to trigger a GC run. In this case, it's possible (probable, really) that it * will need to do that triggering, notifying other running threads that the * time has come to GC. */ void MVM_gc_enter_from_allocator(MVMThreadContext *tc) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Entered from allocate\n"); MVM_telemetry_timestamp(tc, "gc_enter_from_allocator"); /* Try to start the GC run. */ if (MVM_trycas(&tc->instance->gc_start, 0, 1)) { MVMThread *last_starter = NULL; MVMuint32 num_threads = 0; /* Need to wait for other threads to reset their gc_status. */ while (MVM_load(&tc->instance->gc_ack)) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : waiting for other thread's gc_ack\n"); MVM_platform_thread_yield(); } /* We are the winner of the GC starting race. This gives us some * extra responsibilities as well as doing the usual things. * First, increment GC sequence number. */ MVM_incr(&tc->instance->gc_seq_number); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : GC thread elected coordinator: starting gc seq %d\n", (int)MVM_load(&tc->instance->gc_seq_number)); /* Decide if it will be a full collection. */ tc->instance->gc_full_collect = is_full_collection(tc); MVM_telemetry_timestamp(tc, "won the gc starting race"); /* If profiling, record that GC is starting. */ if (tc->instance->profiling) MVM_profiler_log_gc_start(tc, tc->instance->gc_full_collect); /* Ensure our stolen list is empty. */ tc->gc_work_count = 0; /* Flag that we didn't agree on this run that all the in-trays are * cleared (a responsibility of the co-ordinator. */ MVM_store(&tc->instance->gc_intrays_clearing, 1); /* We'll take care of our own work. */ add_work(tc, tc); /* Find other threads, and signal or steal. */ do { MVMThread *threads = (MVMThread *)MVM_load(&tc->instance->threads); if (threads && threads != last_starter) { MVMThread *head = threads; MVMuint32 add; while ((threads = (MVMThread *)MVM_casptr(&tc->instance->threads, head, NULL)) != head) { head = threads; } add = signal_all_but(tc, head, last_starter); last_starter = head; if (add) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Found %d other threads\n", add); MVM_add(&tc->instance->gc_start, add); num_threads += add; } } /* If there's an event loop thread, wake it up to participate. */ if (tc->instance->event_loop_wakeup) uv_async_send(tc->instance->event_loop_wakeup); } while (MVM_load(&tc->instance->gc_start) > 1); /* Sanity checks. */ if (!MVM_trycas(&tc->instance->threads, NULL, last_starter)) MVM_panic(MVM_exitcode_gcorch, "threads list corrupted\n"); if (MVM_load(&tc->instance->gc_finish) != 0) MVM_panic(MVM_exitcode_gcorch, "Finish votes was %"MVM_PRSz"\n", MVM_load(&tc->instance->gc_finish)); /* gc_ack gets an extra so the final acknowledger * can also free the STables. */ MVM_store(&tc->instance->gc_finish, num_threads + 1); MVM_store(&tc->instance->gc_ack, num_threads + 2); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : finish votes is %d\n", (int)MVM_load(&tc->instance->gc_finish)); /* Now we're ready to start, zero promoted since last full collection * counter if this is a full collect. */ if (tc->instance->gc_full_collect) MVM_store(&tc->instance->gc_promoted_bytes_since_last_full, 0); /* This is a safe point for us to free any STables that have been marked * for deletion in the previous collection (since we let finalization - * which appends to this list - happen after we set threads on their * way again, it's not safe to do it in the previous collection). */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Freeing STables if needed\n"); MVM_gc_collect_free_stables(tc); /* Signal to the rest to start */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : coordinator signalling start\n"); if (MVM_decr(&tc->instance->gc_start) != 1) MVM_panic(MVM_exitcode_gcorch, "Start votes was %"MVM_PRSz"\n", MVM_load(&tc->instance->gc_start)); /* Start collecting. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : coordinator entering run_gc\n"); run_gc(tc, MVMGCWhatToDo_All); /* If profiling, record that GC is over. */ if (tc->instance->profiling) MVM_profiler_log_gc_end(tc); MVM_telemetry_timestamp(tc, "gc finished"); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : GC complete (cooridnator)\n"); } else { /* Another thread beat us to starting the GC sync process. Thus, act as * if we were interrupted to GC. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Lost coordinator election\n"); MVM_gc_enter_from_interrupt(tc); } }
/* Enters the work loop. */ static void worker(MVMThreadContext *tc, MVMCallsite *callsite, MVMRegister *args) { MVMObject *updated_static_frames = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray); MVMObject *previous_static_frames = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray); tc->instance->speshworker_thread_id = tc->thread_obj->body.thread_id; MVMROOT2(tc, updated_static_frames, previous_static_frames, { while (1) { MVMObject *log_obj; MVMuint64 start_time; unsigned int interval_id; if (MVM_spesh_debug_enabled(tc)) start_time = uv_hrtime(); log_obj = MVM_repr_shift_o(tc, tc->instance->spesh_queue); if (MVM_spesh_debug_enabled(tc)) { MVM_spesh_debug_printf(tc, "Received Logs\n" "=============\n\n" "Was waiting %dus for logs on the log queue.\n\n", (int)((uv_hrtime() - start_time) / 1000)); } if (tc->instance->main_thread->prof_data) MVM_profiler_log_spesh_start(tc); interval_id = MVM_telemetry_interval_start(tc, "spesh worker consuming a log"); uv_mutex_lock(&(tc->instance->mutex_spesh_sync)); tc->instance->spesh_working = 1; uv_mutex_unlock(&(tc->instance->mutex_spesh_sync)); tc->instance->spesh_stats_version++; if (log_obj->st->REPR->ID == MVM_REPR_ID_MVMSpeshLog) { MVMSpeshLog *sl = (MVMSpeshLog *)log_obj; MVM_telemetry_interval_annotate((uintptr_t)sl->body.thread->body.tc, interval_id, "from this thread"); MVMROOT(tc, sl, { MVMThreadContext *stc; MVMuint32 i; MVMuint32 n; /* Update stats, and if we're logging dump each of them. */ tc->instance->spesh_stats_version++; if (MVM_spesh_debug_enabled(tc)) start_time = uv_hrtime(); MVM_spesh_stats_update(tc, sl, updated_static_frames); n = MVM_repr_elems(tc, updated_static_frames); if (MVM_spesh_debug_enabled(tc)) { MVM_spesh_debug_printf(tc, "Statistics Updated\n" "==================\n" "%d frames had their statistics updated in %dus.\n\n", (int)n, (int)((uv_hrtime() - start_time) / 1000)); for (i = 0; i < n; i++) { char *dump = MVM_spesh_dump_stats(tc, (MVMStaticFrame* ) MVM_repr_at_pos_o(tc, updated_static_frames, i)); MVM_spesh_debug_printf(tc, "%s==========\n\n", dump); MVM_free(dump); } } MVM_telemetry_interval_annotate((uintptr_t)n, interval_id, "stats for this many frames"); GC_SYNC_POINT(tc); /* Form a specialization plan. */ if (MVM_spesh_debug_enabled(tc)) start_time = uv_hrtime(); tc->instance->spesh_plan = MVM_spesh_plan(tc, updated_static_frames); if (MVM_spesh_debug_enabled(tc)) { n = tc->instance->spesh_plan->num_planned; MVM_spesh_debug_printf(tc, "Specialization Plan\n" "===================\n" "%u specialization(s) will be produced (planned in %dus).\n\n", n, (int)((uv_hrtime() - start_time) / 1000)); for (i = 0; i < n; i++) { char *dump = MVM_spesh_dump_planned(tc, &(tc->instance->spesh_plan->planned[i])); MVM_spesh_debug_printf(tc, "%s==========\n\n", dump); MVM_free(dump); } } MVM_telemetry_interval_annotate((uintptr_t)tc->instance->spesh_plan->num_planned, interval_id, "this many specializations planned"); GC_SYNC_POINT(tc); /* Implement the plan and then discard it. */ n = tc->instance->spesh_plan->num_planned; for (i = 0; i < n; i++) { MVM_spesh_candidate_add(tc, &(tc->instance->spesh_plan->planned[i])); GC_SYNC_POINT(tc); } MVM_spesh_plan_destroy(tc, tc->instance->spesh_plan); tc->instance->spesh_plan = NULL; /* Clear up stats that didn't get updated for a while, * then add frames updated this time into the previously * updated array. */ MVM_spesh_stats_cleanup(tc, previous_static_frames); n = MVM_repr_elems(tc, updated_static_frames); for (i = 0; i < n; i++) MVM_repr_push_o(tc, previous_static_frames, MVM_repr_at_pos_o(tc, updated_static_frames, i)); /* Clear updated static frames array. */ MVM_repr_pos_set_elems(tc, updated_static_frames, 0); /* Allow the sending thread to produce more logs again, * putting a new spesh log in place if needed. */ stc = sl->body.thread->body.tc; if (stc && !sl->body.was_compunit_bumped) if (MVM_incr(&(stc->spesh_log_quota)) == 0) { stc->spesh_log = MVM_spesh_log_create(tc, sl->body.thread); MVM_telemetry_timestamp(stc, "logging restored after quota had run out"); } /* If needed, signal sending thread that it can continue. */ if (sl->body.block_mutex) { uv_mutex_lock(sl->body.block_mutex); MVM_store(&(sl->body.completed), 1); uv_cond_signal(sl->body.block_condvar); uv_mutex_unlock(sl->body.block_mutex); } { MVMSpeshLogEntry *entries = sl->body.entries; sl->body.entries = NULL; MVM_free(entries); } }); } else if (MVM_is_null(tc, log_obj)) {