/* This is called when a thread hits an interrupt at a GC safe point. This means * that another thread is already trying to start a GC run, so we don't need to * try and do that, just enlist in the run. */ void MVM_gc_enter_from_interrupt(MVMThreadContext *tc) { MVMuint8 decr = 0; AO_t curr; tc->gc_work_count = 0; add_work(tc, tc); /* grab our child */ signal_child(tc); /* Count us in to the GC run. Wait for a vote to steal. */ GCORCH_LOG(tc, "Thread %d run %d : Entered from interrupt\n"); while ((curr = tc->instance->gc_start) < 2 || !MVM_cas(&tc->instance->gc_start, curr, curr - 1)) { /* apr_sleep(1); apr_thread_yield();*/ } /* Wait for all threads to indicate readiness to collect. */ while (tc->instance->gc_start) { /* apr_sleep(1); apr_thread_yield();*/ } run_gc(tc, MVMGCWhatToDo_NoInstance); }
/* Called by a thread to indicate it has completed a block operation and is * thus able to particpate in a GC run again. Note that this case needs some * special handling if it comes out of this mode when a GC run is taking place. */ void MVM_gc_mark_thread_unblocked(MVMThreadContext *tc) { /* Try to set it from unable to running. */ while (MVM_cas(&tc->gc_status, MVMGCStatus_UNABLE, MVMGCStatus_NONE) != MVMGCStatus_UNABLE) { /* We can't, presumably because a GC run is going on. We should wait * for that to finish before we go on, but without chewing CPU. */ MVM_platform_thread_yield(); } }
static void signal_child(MVMThreadContext *tc) { MVMThread *child = tc->thread_obj->body.new_child; /* if we still have it, its state will be UNABLE, so steal it. */ if (child) { /* this will never return nonzero, because the child's status * will always be UNABLE or STOLEN. */ signal_one_thread(tc, child->body.tc); while (!MVM_cas(&tc->thread_obj->body.new_child, tc->thread_obj->body.new_child, NULL)); } }
/* Goes through all threads but the current one and notifies them that a * GC run is starting. Those that are blocked are considered excluded from * the run, and are not counted. Returns the count of threads that should be * added to the finished countdown. */ static MVMuint32 signal_one_thread(MVMThreadContext *tc, MVMThreadContext *to_signal) { /* Loop here since we may not succeed first time (e.g. the status of the * thread may change between the two ways we try to twiddle it). */ while (1) { switch (MVM_load(&to_signal->gc_status)) { case MVMGCStatus_NONE: /* Try to set it from running to interrupted - the common case. */ if (MVM_cas(&to_signal->gc_status, MVMGCStatus_NONE, MVMGCStatus_INTERRUPT) == MVMGCStatus_NONE) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Signalled thread %d to interrupt\n", to_signal->thread_id); return 1; } break; case MVMGCStatus_INTERRUPT: GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : thread %d already interrupted\n", to_signal->thread_id); return 0; case MVMGCStatus_UNABLE: /* Otherwise, it's blocked; try to set it to work Stolen. */ if (MVM_cas(&to_signal->gc_status, MVMGCStatus_UNABLE, MVMGCStatus_STOLEN) == MVMGCStatus_UNABLE) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : A blocked thread %d spotted; work stolen\n", to_signal->thread_id); add_work(tc, to_signal); return 0; } break; /* this case occurs if a child thread is Stolen by its parent * before we get to it in the chain. */ case MVMGCStatus_STOLEN: GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : thread %d already stolen (it was a spawning child)\n", to_signal->thread_id); return 0; default: MVM_panic(MVM_exitcode_gcorch, "invalid status %"MVM_PRSz" in GC orchestrate\n", MVM_load(&to_signal->gc_status)); return 0; } } }
/* Called by a thread to indicate it is about to enter a blocking operation. * This tells any thread that is coordinating a GC run that this thread will * be unable to participate. */ void MVM_gc_mark_thread_blocked(MVMThreadContext *tc) { /* This may need more than one attempt. */ while (1) { /* Try to set it from running to unable - the common case. */ if (MVM_cas(&tc->gc_status, MVMGCStatus_NONE, MVMGCStatus_UNABLE) == MVMGCStatus_NONE) return; /* The only way this can fail is if another thread just decided we're to * participate in a GC run. */ if (MVM_load(&tc->gc_status) == MVMGCStatus_INTERRUPT) MVM_gc_enter_from_interrupt(tc); else MVM_panic(MVM_exitcode_gcorch, "Invalid GC status observed; aborting"); } }
static void finish_gc(MVMThreadContext *tc, MVMuint8 gen, MVMuint8 is_coordinator) { MVMuint32 i, did_work; /* Do any extra work that we have been passed. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : doing any work in thread in-trays\n"); did_work = 1; while (did_work) { did_work = 0; for (i = 0; i < tc->gc_work_count; i++) did_work += process_in_tray(tc->gc_work[i].tc, gen); } /* Decrement gc_finish to say we're done, and wait for termination. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Voting to finish\n"); MVM_decr(&tc->instance->gc_finish); while (MVM_load(&tc->instance->gc_finish)) { for (i = 0; i < 1000; i++) ; /* XXX Something HT-efficienter. */ /* XXX Here we can look to see if we got passed any work, and if so * try to un-vote. */ } GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Termination agreed\n"); /* Co-ordinator should do final check over all the in-trays, and trigger * collection until all is settled. Rest should wait. Additionally, after * in-trays are settled, coordinator walks threads looking for anything * that needs adding to the finalize queue. It then will make another * iteration over in-trays to handle cross-thread references to objects * needing finalization. For full collections, collected objects are then * cleaned from all inter-generational sets, and finally any objects to * be freed at the fixed size allocator's next safepoint are freed. */ if (is_coordinator) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Co-ordinator handling in-tray clearing completion\n"); clear_intrays(tc, gen); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Co-ordinator handling finalizers\n"); MVM_finalize_walk_queues(tc, gen); clear_intrays(tc, gen); if (gen == MVMGCGenerations_Both) { MVMThread *cur_thread = (MVMThread *)MVM_load(&tc->instance->threads); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Co-ordinator handling inter-gen root cleanup\n"); while (cur_thread) { if (cur_thread->body.tc) MVM_gc_root_gen2_cleanup(cur_thread->body.tc); cur_thread = cur_thread->body.next; } } GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Co-ordinator handling fixed-size allocator safepoint frees\n"); MVM_fixed_size_safepoint(tc, tc->instance->fsa); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Co-ordinator signalling in-trays clear\n"); MVM_store(&tc->instance->gc_intrays_clearing, 0); } else { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Waiting for in-tray clearing completion\n"); while (MVM_load(&tc->instance->gc_intrays_clearing)) for (i = 0; i < 1000; i++) ; /* XXX Something HT-efficienter. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Got in-tray clearing complete notice\n"); } /* Reset GC status flags. This is also where thread destruction happens, * and it needs to happen before we acknowledge this GC run is finished. */ for (i = 0; i < tc->gc_work_count; i++) { MVMThreadContext *other = tc->gc_work[i].tc; MVMThread *thread_obj = other->thread_obj; if (MVM_load(&thread_obj->body.stage) == MVM_thread_stage_clearing_nursery) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : transferring gen2 of thread %d\n", other->thread_id); MVM_gc_gen2_transfer(other, tc); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : destroying thread %d\n", other->thread_id); MVM_tc_destroy(other); tc->gc_work[i].tc = thread_obj->body.tc = NULL; MVM_store(&thread_obj->body.stage, MVM_thread_stage_destroyed); } else { if (MVM_load(&thread_obj->body.stage) == MVM_thread_stage_exited) { /* Don't bother freeing gen2; we'll do it next time */ MVM_store(&thread_obj->body.stage, MVM_thread_stage_clearing_nursery); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : set thread %d clearing nursery stage to %d\n", other->thread_id, (int)MVM_load(&thread_obj->body.stage)); } MVM_cas(&other->gc_status, MVMGCStatus_STOLEN, MVMGCStatus_UNABLE); MVM_cas(&other->gc_status, MVMGCStatus_INTERRUPT, MVMGCStatus_NONE); } } /* Signal acknowledgement of completing the cleanup, * except for STables, and if we're the final to do * so, free the STables, which have been linked. */ if (MVM_decr(&tc->instance->gc_ack) == 2) { /* Set it to zero (we're guaranteed the only ones trying to write to * it here). Actual STable free in MVM_gc_enter_from_allocator. */ MVM_store(&tc->instance->gc_ack, 0); } }
/* This is called when the allocator finds it has run out of memory and wants * to trigger a GC run. In this case, it's possible (probable, really) that it * will need to do that triggering, notifying other running threads that the * time has come to GC. */ void MVM_gc_enter_from_allocator(MVMThreadContext *tc) { GCORCH_LOG(tc, "Thread %d run %d : Entered from allocate\n"); /* Try to start the GC run. */ if (MVM_cas(&tc->instance->gc_start, 0, 1)) { MVMThread *last_starter = NULL; MVMuint32 num_threads = 0; /* We are the winner of the GC starting race. This gives us some * extra responsibilities as well as doing the usual things. * First, increment GC sequence number. */ tc->instance->gc_seq_number++; GCORCH_LOG(tc, "Thread %d run %d : GC thread elected coordinator: starting gc seq %d\n", tc->instance->gc_seq_number); /* Ensure our stolen list is empty. */ tc->gc_work_count = 0; /* need to wait for other threads to reset their gc_status. */ while (tc->instance->gc_ack) apr_thread_yield(); add_work(tc, tc); /* grab our child */ signal_child(tc); do { if (tc->instance->threads && tc->instance->threads != last_starter) { MVMThread *head; MVMuint32 add; while (!MVM_cas(&tc->instance->threads, (head = tc->instance->threads), NULL)); add = signal_all_but(tc, head, last_starter); last_starter = head; if (add) { GCORCH_LOG(tc, "Thread %d run %d : Found %d other threads\n", add); MVM_atomic_add(&tc->instance->gc_start, add); num_threads += add; } } } while (tc->instance->gc_start > 1); if (!MVM_cas(&tc->instance->threads, NULL, last_starter)) MVM_panic(MVM_exitcode_gcorch, "threads list corrupted\n"); if (tc->instance->gc_finish != 0) MVM_panic(MVM_exitcode_gcorch, "finish votes was %d\n", tc->instance->gc_finish); tc->instance->gc_ack = tc->instance->gc_finish = num_threads + 1; GCORCH_LOG(tc, "Thread %d run %d : finish votes is %d\n", (int)tc->instance->gc_finish); /* signal to the rest to start */ if (MVM_atomic_decr(&tc->instance->gc_start) != 1) MVM_panic(MVM_exitcode_gcorch, "start votes was %d\n", tc->instance->gc_finish); run_gc(tc, MVMGCWhatToDo_All); } else { /* Another thread beat us to starting the GC sync process. Thus, act as * if we were interrupted to GC. */ GCORCH_LOG(tc, "Thread %d run %d : Lost coordinator election\n"); MVM_gc_enter_from_interrupt(tc); } }