/* This is called when a thread hits an interrupt at a GC safe point. This means * that another thread is already trying to start a GC run, so we don't need to * try and do that, just enlist in the run. */ void MVM_gc_enter_from_interrupt(MVMThreadContext *tc) { MVMuint8 decr = 0; AO_t curr; tc->gc_work_count = 0; add_work(tc, tc); /* grab our child */ signal_child(tc); /* Count us in to the GC run. Wait for a vote to steal. */ GCORCH_LOG(tc, "Thread %d run %d : Entered from interrupt\n"); while ((curr = tc->instance->gc_start) < 2 || !MVM_trycas(&tc->instance->gc_start, curr, curr - 1)) { /* apr_sleep(1); apr_thread_yield();*/ } /* Wait for all threads to indicate readiness to collect. */ while (tc->instance->gc_start) { /* apr_sleep(1); apr_thread_yield();*/ } run_gc(tc, MVMGCWhatToDo_NoInstance); }
/* Save dead STable pointers to delete later.. */ static void MVM_gc_collect_enqueue_stable_for_deletion(MVMThreadContext *tc, MVMSTable *st) { MVMSTable *old_head; do { old_head = tc->instance->stables_to_free; st->header.forwarder = (MVMCollectable *)old_head; } while (!MVM_trycas(&tc->instance->stables_to_free, old_head, st)); }
/* Save dead STable pointers to delete later.. */ static void MVM_gc_collect_enqueue_stable_for_deletion(MVMThreadContext *tc, MVMSTable *st) { MVMSTable *old_head; #ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX assert(!(st->header.flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED)); #endif do { old_head = tc->instance->stables_to_free; st->header.sc_forward_u.st = old_head; } while (!MVM_trycas(&tc->instance->stables_to_free, old_head, st)); }
static void signal_child(MVMThreadContext *tc) { MVMThread *child = tc->thread_obj->body.new_child; /* if we still have it, its state will be UNABLE, so steal it. */ if (child) { /* this will never return nonzero, because the child's status * will always be UNABLE or STOLEN. */ signal_one_thread(tc, child->body.tc); while (!MVM_trycas(&tc->thread_obj->body.new_child, tc->thread_obj->body.new_child, NULL)); } }
/* Return/unwind do about the same thing; this factors it out. */ static MVMuint64 return_or_unwind(MVMThreadContext *tc, MVMuint8 unwind) { MVMFrame *returner = tc->cur_frame; MVMFrame *caller = returner->caller; MVMFrame *prior; /* Decrement the frame reference of the prior invocation, and then * set us as it. */ do { prior = returner->static_info->body.prior_invocation; } while (!MVM_trycas(&returner->static_info->body.prior_invocation, prior, returner)); if (prior) MVM_frame_dec_ref(tc, prior); /* Clear up argument processing leftovers, if any. */ if (returner->work) { MVM_args_proc_cleanup_for_cache(tc, &returner->params); } /* signal to the GC to ignore ->work */ returner->tc = NULL; /* Switch back to the caller frame if there is one; we also need to * decrement its reference count. */ if (caller && returner != tc->thread_entry_frame) { tc->cur_frame = caller; *(tc->interp_cur_op) = caller->return_address; *(tc->interp_bytecode_start) = caller->static_info->body.bytecode; *(tc->interp_reg_base) = caller->work; *(tc->interp_cu) = caller->static_info->body.cu; MVM_frame_dec_ref(tc, caller); returner->caller = NULL; /* Handle any special return hooks. */ if (caller->special_return) { MVMSpecialReturn sr = caller->special_return; caller->special_return = NULL; if (!unwind) sr(tc, caller->special_return_data); } return 1; } else { tc->cur_frame = NULL; return 0; } }
/* This is called when a thread hits an interrupt at a GC safe point. This means * that another thread is already trying to start a GC run, so we don't need to * try and do that, just enlist in the run. */ void MVM_gc_enter_from_interrupt(MVMThreadContext *tc) { AO_t curr; GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Entered from interrupt\n"); MVM_telemetry_timestamp(tc, "gc_enter_from_interrupt"); /* If profiling, record that GC is starting. */ if (tc->instance->profiling) MVM_profiler_log_gc_start(tc, is_full_collection(tc)); /* We'll certainly take care of our own work. */ tc->gc_work_count = 0; add_work(tc, tc); /* Indicate that we're ready to GC. Only want to decrement it if it's 2 or * greater (0 should never happen; 1 means the coordinator is still counting * up how many threads will join in, so we should wait until it decides to * decrement.) */ while ((curr = MVM_load(&tc->instance->gc_start)) < 2 || !MVM_trycas(&tc->instance->gc_start, curr, curr - 1)) { /* MVM_platform_thread_yield();*/ } /* Wait for all threads to indicate readiness to collect. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Waiting for other threads\n"); while (MVM_load(&tc->instance->gc_start)) { /* MVM_platform_thread_yield();*/ } GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Entering run_gc\n"); run_gc(tc, MVMGCWhatToDo_NoInstance); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : GC complete\n"); /* If profiling, record that GC is over. */ if (tc->instance->profiling) MVM_profiler_log_gc_end(tc); }
/* This is called when the allocator finds it has run out of memory and wants * to trigger a GC run. In this case, it's possible (probable, really) that it * will need to do that triggering, notifying other running threads that the * time has come to GC. */ void MVM_gc_enter_from_allocator(MVMThreadContext *tc) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Entered from allocate\n"); /* Try to start the GC run. */ if (MVM_trycas(&tc->instance->gc_start, 0, 1)) { MVMThread *last_starter = NULL; MVMuint32 num_threads = 0; MVMuint32 is_full; /* Need to wait for other threads to reset their gc_status. */ while (MVM_load(&tc->instance->gc_ack)) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : waiting for other thread's gc_ack\n"); MVM_platform_thread_yield(); } /* We are the winner of the GC starting race. This gives us some * extra responsibilities as well as doing the usual things. * First, increment GC sequence number. */ MVM_incr(&tc->instance->gc_seq_number); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : GC thread elected coordinator: starting gc seq %d\n", (int)MVM_load(&tc->instance->gc_seq_number)); /* Decide if it will be a full collection. */ is_full = is_full_collection(tc); /* If profiling, record that GC is starting. */ if (tc->instance->profiling) MVM_profiler_log_gc_start(tc, is_full); /* Ensure our stolen list is empty. */ tc->gc_work_count = 0; /* Flag that we didn't agree on this run that all the in-trays are * cleared (a responsibility of the co-ordinator. */ MVM_store(&tc->instance->gc_intrays_clearing, 1); /* We'll take care of our own work. */ add_work(tc, tc); /* Find other threads, and signal or steal. */ do { MVMThread *threads = (MVMThread *)MVM_load(&tc->instance->threads); if (threads && threads != last_starter) { MVMThread *head = threads; MVMuint32 add; while ((threads = (MVMThread *)MVM_casptr(&tc->instance->threads, head, NULL)) != head) { head = threads; } add = signal_all_but(tc, head, last_starter); last_starter = head; if (add) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Found %d other threads\n", add); MVM_add(&tc->instance->gc_start, add); num_threads += add; } } /* If there's an event loop thread, wake it up to participate. */ if (tc->instance->event_loop_wakeup) uv_async_send(tc->instance->event_loop_wakeup); } while (MVM_load(&tc->instance->gc_start) > 1); /* Sanity checks. */ if (!MVM_trycas(&tc->instance->threads, NULL, last_starter)) MVM_panic(MVM_exitcode_gcorch, "threads list corrupted\n"); if (MVM_load(&tc->instance->gc_finish) != 0) MVM_panic(MVM_exitcode_gcorch, "Finish votes was %"MVM_PRSz"\n", MVM_load(&tc->instance->gc_finish)); /* gc_ack gets an extra so the final acknowledger * can also free the STables. */ MVM_store(&tc->instance->gc_finish, num_threads + 1); MVM_store(&tc->instance->gc_ack, num_threads + 2); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : finish votes is %d\n", (int)MVM_load(&tc->instance->gc_finish)); /* Now we're ready to start, zero promoted since last full collection * counter if this is a full collect. */ if (is_full) MVM_store(&tc->instance->gc_promoted_bytes_since_last_full, 0); /* Signal to the rest to start */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : coordinator signalling start\n"); if (MVM_decr(&tc->instance->gc_start) != 1) MVM_panic(MVM_exitcode_gcorch, "Start votes was %"MVM_PRSz"\n", MVM_load(&tc->instance->gc_start)); /* Start collecting. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : coordinator entering run_gc\n"); run_gc(tc, MVMGCWhatToDo_All); /* Free any STables that have been marked for deletion. It's okay for * us to muck around in another thread's fromspace while it's mutating * tospace, really. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Freeing STables if needed\n"); MVM_gc_collect_free_stables(tc); /* If profiling, record that GC is over. */ if (tc->instance->profiling) MVM_profiler_log_gc_end(tc); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : GC complete (cooridnator)\n"); } else { /* Another thread beat us to starting the GC sync process. Thus, act as * if we were interrupted to GC. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Lost coordinator election\n"); MVM_gc_enter_from_interrupt(tc); } }
/* This is called when the allocator finds it has run out of memory and wants * to trigger a GC run. In this case, it's possible (probable, really) that it * will need to do that triggering, notifying other running threads that the * time has come to GC. */ void MVM_gc_enter_from_allocator(MVMThreadContext *tc) { GCORCH_LOG(tc, "Thread %d run %d : Entered from allocate\n"); /* Try to start the GC run. */ if (MVM_trycas(&tc->instance->gc_start, 0, 1)) { MVMThread *last_starter = NULL; MVMuint32 num_threads = 0; /* We are the winner of the GC starting race. This gives us some * extra responsibilities as well as doing the usual things. * First, increment GC sequence number. */ tc->instance->gc_seq_number++; GCORCH_LOG(tc, "Thread %d run %d : GC thread elected coordinator: starting gc seq %d\n", tc->instance->gc_seq_number); /* Ensure our stolen list is empty. */ tc->gc_work_count = 0; /* need to wait for other threads to reset their gc_status. */ while (tc->instance->gc_ack) apr_thread_yield(); add_work(tc, tc); /* grab our child */ signal_child(tc); do { if (tc->instance->threads && tc->instance->threads != last_starter) { MVMThread *head; MVMuint32 add; while (!MVM_trycas(&tc->instance->threads, (head = tc->instance->threads), NULL)); add = signal_all_but(tc, head, last_starter); last_starter = head; if (add) { GCORCH_LOG(tc, "Thread %d run %d : Found %d other threads\n", add); MVM_atomic_add(&tc->instance->gc_start, add); num_threads += add; } } } while (tc->instance->gc_start > 1); if (!MVM_trycas(&tc->instance->threads, NULL, last_starter)) MVM_panic(MVM_exitcode_gcorch, "threads list corrupted\n"); if (tc->instance->gc_finish != 0) MVM_panic(MVM_exitcode_gcorch, "finish votes was %d\n", tc->instance->gc_finish); tc->instance->gc_ack = tc->instance->gc_finish = num_threads + 1; GCORCH_LOG(tc, "Thread %d run %d : finish votes is %d\n", (int)tc->instance->gc_finish); /* signal to the rest to start */ if (MVM_atomic_decr(&tc->instance->gc_start) != 1) MVM_panic(MVM_exitcode_gcorch, "start votes was %d\n", tc->instance->gc_finish); run_gc(tc, MVMGCWhatToDo_All); } else { /* Another thread beat us to starting the GC sync process. Thus, act as * if we were interrupted to GC. */ GCORCH_LOG(tc, "Thread %d run %d : Lost coordinator election\n"); MVM_gc_enter_from_interrupt(tc); } }