MVMObject * references_str(MVMThreadContext *tc, MVMHeapSnapshot *s) { /* Produces ; separated sequences of: * kind,idx,to * All of which are integers. */ MVMObject *result; size_t buffer_size = 10 * s->num_references; size_t buffer_pos = 0; char *buffer = MVM_malloc(buffer_size); MVMuint64 i; for (i = 0; i < s->num_references; i++) { char tmp[128]; int item_chars = snprintf(tmp, 128, "%"PRIu64",%"PRIu64",%"PRIu64";", s->references[i].description & ((1 << MVM_SNAPSHOT_REF_KIND_BITS) - 1), s->references[i].description >> MVM_SNAPSHOT_REF_KIND_BITS, s->references[i].collectable_index); if (item_chars < 0) MVM_panic(1, "Failed to save reference in heap snapshot"); if (buffer_pos + item_chars >= buffer_size) { buffer_size += 4096; buffer = MVM_realloc(buffer, buffer_size); } memcpy(buffer + buffer_pos, tmp, item_chars); buffer_pos += item_chars; } if (buffer_pos > 1) buffer[buffer_pos - 1] = 0; /* Cut off the trailing ; for ease of parsing */ buffer[buffer_pos] = 0; result = box_s(tc, vmstr(tc, buffer)); MVM_free(buffer); return result; }
MVMObject * types_str(MVMThreadContext *tc, MVMHeapSnapshotCollection *col) { /* Produces ; separated sequences of: * repr_string_index,type_name_string_index * Both of which are integers. */ MVMObject *result; size_t buffer_size = 10 * col->num_types; size_t buffer_pos = 0; char *buffer = MVM_malloc(buffer_size); MVMuint64 i; for (i = 0; i < col->num_types; i++) { char tmp[256]; int item_chars = snprintf(tmp, 256, "%"PRIu64",%"PRIu64";", col->types[i].repr_name, col->types[i].type_name); if (item_chars < 0) MVM_panic(1, "Failed to save type in heap snapshot"); if (buffer_pos + item_chars >= buffer_size) { buffer_size += 4096; buffer = MVM_realloc(buffer, buffer_size); } memcpy(buffer + buffer_pos, tmp, item_chars); buffer_pos += item_chars; } if (buffer_pos > 1) buffer[buffer_pos - 1] = 0; /* Cut off the trailing ; for ease of parsing */ buffer[buffer_pos] = 0; result = box_s(tc, vmstr(tc, buffer)); MVM_free(buffer); return result; }
/* Adds a chunk of work to another thread's in-tray. */ static void push_work_to_thread_in_tray(MVMThreadContext *tc, MVMuint32 target, MVMGCPassedWork *work) { MVMint32 j; MVMGCPassedWork * volatile *target_tray; /* Locate the thread to pass the work to. */ MVMThreadContext *target_tc = NULL; if (target == 1) { /* It's going to the main thread. */ target_tc = tc->instance->main_thread; } else { MVMThread *t = (MVMThread *)MVM_load(&tc->instance->threads); do { if (t->body.tc && t->body.tc->thread_id == target) { target_tc = t->body.tc; break; } } while ((t = t->body.next)); if (!target_tc) MVM_panic(MVM_exitcode_gcnursery, "Internal error: invalid thread ID in GC work pass"); } /* Pass the work, chaining any other in-tray entries for the thread * after us. */ target_tray = &target_tc->gc_in_tray; while (1) { MVMGCPassedWork *orig = *target_tray; work->next = orig; if (MVM_casptr(target_tray, orig, work) == orig) return; } }
void *MVM_platform_alloc_pages(size_t size, int page_mode) { int prot_mode = page_mode_to_prot_mode(page_mode); void * allocd = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, prot_mode); if (!allocd) MVM_panic(1, "MVM_platform_alloc_pages failed: %d", GetLastError()); return allocd; }
/* Sets the current reference "from" collectable. */ static void set_ref_from(MVMThreadContext *tc, MVMHeapSnapshotState *ss, MVMuint64 col_idx) { /* The references should be contiguous, so if this collectable already * has any, something's wrong. */ if (ss->hs->collectables[col_idx].num_refs) MVM_panic(1, "Heap snapshot corruption: can not add non-contiguous refs"); ss->ref_from = col_idx; ss->hs->collectables[col_idx].refs_start = ss->hs->num_references; }
static void run_handler(MVMThreadContext *tc, LocatedHandler lh, MVMObject *ex_obj) { switch (lh.handler->action) { case MVM_EX_ACTION_GOTO: unwind_to_frame(tc, lh.frame); *tc->interp_cur_op = *tc->interp_bytecode_start + lh.handler->goto_offset; break; case MVM_EX_ACTION_INVOKE: { /* Create active handler record. */ MVMActiveHandler *ah = malloc(sizeof(MVMActiveHandler)); /* Find frame to invoke. */ MVMObject *handler_code = MVM_frame_find_invokee(tc, lh.frame->work[lh.handler->block_reg].o); /* Ensure we have an exception object. */ /* TODO: Can make one up. */ if (ex_obj == NULL) MVM_panic(1, "Exception object creation NYI"); /* Install active handler record. */ ah->frame = lh.frame; ah->handler = lh.handler; ah->ex_obj = ex_obj; ah->next_handler = tc->active_handlers; tc->active_handlers = ah; /* Set up special return to unwinding after running the * handler. */ tc->cur_frame->return_value = NULL; tc->cur_frame->return_type = MVM_RETURN_VOID; tc->cur_frame->special_return = unwind_after_handler; tc->cur_frame->special_return_data = ah; /* Inovke the handler frame and return to runloop. */ STABLE(handler_code)->invoke(tc, handler_code, &no_arg_callsite, tc->cur_frame->args); break; } default: MVM_panic(1, "Unimplemented handler action"); } }
/* Cleans up an active handler record if we unwind over it. */ static void cleanup_active_handler(MVMThreadContext *tc, void *sr_data) { /* Get active handler; sanity check (though it's possible other cases * should be supported). */ MVMActiveHandler *ah = (MVMActiveHandler *)sr_data; if (tc->active_handlers != ah) MVM_panic(1, "Trying to unwind over wrong handler"); /* Clean up. */ tc->active_handlers = ah->next_handler; MVM_frame_dec_ref(tc, ah->frame); free(ah); }
static void run_handler(MVMThreadContext *tc, LocatedHandler lh, MVMObject *ex_obj, MVMuint32 category) { switch (lh.handler->action) { case MVM_EX_ACTION_GOTO: if (lh.jit_handler) { void **labels = lh.frame->spesh_cand->jitcode->labels; MVMuint8 *pc = lh.frame->spesh_cand->jitcode->bytecode; lh.frame->jit_entry_label = labels[lh.jit_handler->goto_label]; MVM_frame_unwind_to(tc, lh.frame, pc, 0, NULL); } else { MVM_frame_unwind_to(tc, lh.frame, NULL, lh.handler->goto_offset, NULL); } break; case MVM_EX_ACTION_INVOKE: { /* Create active handler record. */ MVMActiveHandler *ah = MVM_malloc(sizeof(MVMActiveHandler)); MVMFrame *cur_frame = tc->cur_frame; MVMObject *handler_code; /* Ensure we have an exception object. */ if (ex_obj == NULL) { ex_obj = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTException); ((MVMException *)ex_obj)->body.category = category; } /* Find frame to invoke. */ handler_code = MVM_frame_find_invokee(tc, lh.frame->work[lh.handler->block_reg].o, NULL); /* Install active handler record. */ ah->frame = MVM_frame_inc_ref(tc, lh.frame); ah->handler = lh.handler; ah->jit_handler = lh.jit_handler; ah->ex_obj = ex_obj; ah->next_handler = tc->active_handlers; tc->active_handlers = ah; /* Set up special return to unwinding after running the * handler. */ cur_frame->return_value = (MVMRegister *)&tc->last_handler_result; cur_frame->return_type = MVM_RETURN_OBJ; cur_frame->special_return = unwind_after_handler; cur_frame->special_unwind = cleanup_active_handler; cur_frame->special_return_data = ah; /* Invoke the handler frame and return to runloop. */ STABLE(handler_code)->invoke(tc, handler_code, MVM_callsite_get_common(tc, MVM_CALLSITE_ID_NULL_ARGS), cur_frame->args); break; } default: MVM_panic(1, "Unimplemented handler action"); } }
/* Takes a pointer to a guard set. Replaces it with a guard set that also * includes a guard for the specified type tuple (passed with callsite to * know how many types are involved), and resolving to the specified spesh * candidate index. Any previous guard set will be scheduled for freeing at * the next safepoint. */ void MVM_spesh_arg_guard_add(MVMThreadContext *tc, MVMSpeshArgGuard **orig, MVMCallsite *cs, MVMSpeshStatsType *types, MVMuint32 candidate) { MVMSpeshArgGuard *new_guard = copy_and_extend(tc, *orig, max_new_nodes(cs, types)); if (!try_add_guard(tc, new_guard, cs, types, candidate)) MVM_panic(1, "Spesh arg guard: trying to add duplicate result for same guard"); if (*orig) { MVMSpeshArgGuard *prev = *orig; *orig = new_guard; MVM_spesh_arg_guard_destroy(tc, prev, 1); } else { *orig = new_guard; } }
/* Unwinds after a handler. */ void unwind_after_handler(MVMThreadContext *tc, void *sr_data) { /* Get active handler; sanity check (though it's possible other cases * should be supported). */ MVMActiveHandler *ah = (MVMActiveHandler *)sr_data; if (tc->active_handlers != ah) MVM_panic(1, "Trying to unwind from wrong handler"); tc->active_handlers = ah->next_handler; /* Do the unwinding as needed. */ unwind_to_frame(tc, ah->frame); *tc->interp_cur_op = *tc->interp_bytecode_start + ah->handler->goto_offset; /* Clean up. */ free(ah); }
void MVM_frame_unwind_to(MVMThreadContext *tc, MVMFrame *frame, MVMuint8 *abs_addr, MVMuint32 rel_addr, MVMObject *return_value) { while (tc->cur_frame != frame) { if (tc->cur_frame->static_info->body.has_exit_handler && !(tc->cur_frame->flags & MVM_FRAME_FLAG_EXIT_HAND_RUN)) { /* We're unwinding a frame with an exit handler. Thus we need to * pause the unwind, run the exit handler, and keep enough info * around in order to finish up the unwind afterwards. */ MVMFrame *caller = tc->cur_frame->caller; MVMHLLConfig *hll = MVM_hll_current(tc); MVMObject *handler; if (!caller) MVM_exception_throw_adhoc(tc, "Entry point frame cannot have an exit handler"); if (tc->cur_frame == tc->thread_entry_frame) MVM_exception_throw_adhoc(tc, "Thread entry point frame cannot have an exit handler"); MVM_args_setup_thunk(tc, NULL, MVM_RETURN_VOID, &exit_arg_callsite); tc->cur_frame->args[0].o = tc->cur_frame->code_ref; tc->cur_frame->args[1].o = NULL; tc->cur_frame->special_return = continue_unwind; { MVMUnwindData *ud = malloc(sizeof(MVMUnwindData)); ud->frame = frame; ud->abs_addr = abs_addr; ud->rel_addr = rel_addr; if (return_value) MVM_exception_throw_adhoc(tc, "return_value + exit_handler case NYI"); tc->cur_frame->special_return_data = ud; } tc->cur_frame->flags |= MVM_FRAME_FLAG_EXIT_HAND_RUN; handler = MVM_frame_find_invokee(tc, hll->exit_handler, NULL); STABLE(handler)->invoke(tc, handler, &exit_arg_callsite, tc->cur_frame->args); return; } else { /* No exit handler, so just remove the frame. */ if (!remove_one_frame(tc, 1)) MVM_panic(1, "Internal error: Unwound entire stack and missed handler"); } } if (abs_addr) *tc->interp_cur_op = abs_addr; else if (rel_addr) *tc->interp_cur_op = *tc->interp_bytecode_start + rel_addr; if (return_value) MVM_args_set_result_obj(tc, return_value, 1); }
/* Resolves or inserts a node for testing the curernt type loaded into the * test buffer. If it needs to insert a new node, it chains it on to the * end of the existing set of type tests. */ static MVMuint32 get_type_check_node(MVMThreadContext *tc, MVMSpeshArgGuard *ag, MVMuint32 base_node, MVMObject *type, MVMuint8 concrete) { MVMuint32 current_node = ag->nodes[base_node].yes; MVMuint32 have_fixup_node = 0; MVMuint32 fixup_node; while (current_node != 0) { MVMSpeshArgGuardNode *agn = &(ag->nodes[current_node]); if (agn->op == MVM_SPESH_GUARD_OP_STABLE_CONC) { /* If it matches, we've found it. */ if (concrete && agn->st == type->st) return current_node; /* Otherwise, treat this as the working fixup node, and take * the no branch. */ fixup_node = current_node; have_fixup_node = 1; current_node = agn->no; } else if (agn->op == MVM_SPESH_GUARD_OP_STABLE_TYPE) { /* If it matches, we've found it. */ if (!concrete && agn->st == type->st) return current_node; /* Otherwise, treat this as the working fixup node, and take * the no branch. */ fixup_node = current_node; have_fixup_node = 1; current_node = agn->no; } else { /* We only expect type matching nodes at the top level. */ MVM_panic(1, "Spesh arg guard: unexpected type structure in tree"); } } /* If we get here, we need to add a node for this callsite. */ ag->nodes[ag->used_nodes].op = concrete ? MVM_SPESH_GUARD_OP_STABLE_CONC : MVM_SPESH_GUARD_OP_STABLE_TYPE; ag->nodes[ag->used_nodes].st = type->st; ag->nodes[ag->used_nodes].yes = 0; ag->nodes[ag->used_nodes].no = 0; if (have_fixup_node) ag->nodes[fixup_node].no = ag->used_nodes; else ag->nodes[base_node].yes = ag->used_nodes; return ag->used_nodes++; }
/* Called by a thread to indicate it is about to enter a blocking operation. * This tells any thread that is coordinating a GC run that this thread will * be unable to participate. */ void MVM_gc_mark_thread_blocked(MVMThreadContext *tc) { /* This may need more than one attempt. */ while (1) { /* Try to set it from running to unable - the common case. */ if (MVM_cas(&tc->gc_status, MVMGCStatus_NONE, MVMGCStatus_UNABLE) == MVMGCStatus_NONE) return; /* The only way this can fail is if another thread just decided we're to * participate in a GC run. */ if (MVM_load(&tc->gc_status) == MVMGCStatus_INTERRUPT) MVM_gc_enter_from_interrupt(tc); else MVM_panic(MVM_exitcode_gcorch, "Invalid GC status observed; aborting"); } }
/* Searches for a handler of the specified category, relative to the given * starting frame, searching according to the chosen mode. */ static LocatedHandler search_for_handler_from(MVMThreadContext *tc, MVMFrame *f, MVMuint8 mode, MVMuint32 cat, MVMObject *payload) { LocatedHandler lh; lh.frame = NULL; lh.handler = NULL; lh.jit_handler = NULL; lh.handler_out_of_dynamic_scope = 0; switch (mode) { case MVM_EX_THROW_LEX_CALLER: f = f->caller; while (f && f->static_info->body.is_thunk) f = f->caller; /* And now we've gone down a caller, it's just lexical... */ case MVM_EX_THROW_LEX: while (f != NULL) { if (search_frame_handlers(tc, f, MVM_EX_THROW_LEX, cat, payload, &lh)) { if (in_caller_chain(tc, f)) lh.frame = f; else lh.handler_out_of_dynamic_scope = 1; return lh; } f = f->outer; } return lh; case MVM_EX_THROW_DYN: while (f != NULL) { if (search_frame_handlers(tc, f, mode, cat, payload, &lh)) { lh.frame = f; return lh; } f = f->caller; } return lh; case MVM_EX_THROW_LEXOTIC: while (f != NULL) { lh = search_for_handler_from(tc, f, MVM_EX_THROW_LEX, cat, payload); if (lh.frame != NULL) return lh; f = f->caller; } return lh; default: MVM_panic(1, "Unhandled exception throw mode %d", (int)mode); } }
static void run_gc(MVMThreadContext *tc, MVMuint8 what_to_do) { MVMuint8 gen; MVMuint32 i, n; #if MVM_GC_DEBUG if (tc->in_spesh) MVM_panic(1, "Must not GC when in the specializer/JIT\n"); #endif /* Decide nursery or full collection. */ gen = tc->instance->gc_full_collect ? MVMGCGenerations_Both : MVMGCGenerations_Nursery; /* Do GC work for ourselves and any work threads. */ for (i = 0, n = tc->gc_work_count ; i < n; i++) { MVMThreadContext *other = tc->gc_work[i].tc; tc->gc_work[i].limit = other->nursery_alloc; GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : starting collection for thread %d\n", other->thread_id); other->gc_promoted_bytes = 0; MVM_gc_collect(other, (other == tc ? what_to_do : MVMGCWhatToDo_NoInstance), gen); } /* Wait for everybody to agree we're done. */ finish_gc(tc, gen, what_to_do == MVMGCWhatToDo_All); /* Now we're all done, it's safe to finalize any objects that need it. */ /* XXX TODO explore the feasability of doing this in a background * finalizer/destructor thread and letting the main thread(s) continue * on their merry way(s). */ for (i = 0, n = tc->gc_work_count ; i < n; i++) { MVMThreadContext *other = tc->gc_work[i].tc; /* The thread might've been destroyed */ if (!other) continue; /* Contribute this thread's promoted bytes. */ MVM_add(&tc->instance->gc_promoted_bytes_since_last_full, other->gc_promoted_bytes); /* Collect nursery. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : collecting nursery uncopied of thread %d\n", other->thread_id); MVM_gc_collect_free_nursery_uncopied(other, tc->gc_work[i].limit); } }
/* Frame exit handler, used for unwind and normal exit. */ static void log_exit(MVMThreadContext *tc, MVMuint32 unwind) { MVMProfileThreadData *ptd = get_thread_data(tc); /* Ensure we've a current frame; panic if not. */ /* XXX in future, don't panic, try to cope. This is for debugging * profiler issues. */ MVMProfileCallNode *pcn = ptd->current_call; if (!pcn /*|| !unwind && pcn->sf != tc->cur_frame->static_info*/) { MVM_dump_backtrace(tc); MVM_panic(1, "Profiler lost sequence"); } /* Add to total time. */ pcn->total_time += (uv_hrtime() - pcn->cur_entry_time) - pcn->cur_skip_time; /* Move back to predecessor in call graph. */ ptd->current_call = pcn->pred; }
/* Adds a chunk of work to another thread's in-tray. */ static void push_work_to_thread_in_tray(MVMThreadContext *tc, MVMuint32 target, MVMGCPassedWork *work) { MVMint32 j; MVMGCPassedWork * volatile *target_tray; /* Locate the thread to pass the work to. */ MVMThreadContext *target_tc = NULL; if (target == 0) { /* It's going to the main thread. */ target_tc = tc->instance->main_thread; } else { MVMThread *t = (MVMThread *)MVM_load(&tc->instance->threads); do { if (t->body.tc->thread_id == target) { target_tc = t->body.tc; break; } } while ((t = t->body.next)); if (!target_tc) MVM_panic(MVM_exitcode_gcnursery, "Internal error: invalid thread ID in GC work pass"); } /* push to sent_items list */ if (tc->gc_sent_items) { tc->gc_sent_items->next_by_sender = work; work->last_by_sender = tc->gc_sent_items; } /* queue it up to check if the check list isn't clear */ if (!MVM_load(&tc->gc_next_to_check)) { MVM_store(&tc->gc_next_to_check, work); } tc->gc_sent_items = work; /* Pass the work, chaining any other in-tray entries for the thread * after us. */ target_tray = &target_tc->gc_in_tray; while (1) { MVMGCPassedWork *orig = *target_tray; work->next = orig; if (MVM_casptr(target_tray, orig, work) == orig) return; } }
/* Adds work to list of items to pass over to another thread, and if we * reach the pass threshold then does the passing. */ static void pass_work_item(MVMThreadContext *tc, WorkToPass *wtp, MVMCollectable **item_ptr) { ThreadWork *target_info = NULL; MVMuint32 target = (*item_ptr)->owner; MVMuint32 j; MVMInstance *i = tc->instance; /* Find any existing thread work passing list for the target. */ if (target == 0) MVM_panic(MVM_exitcode_gcnursery, "Internal error: zeroed target thread ID in work pass"); for (j = 0; j < wtp->num_target_threads; j++) { if (wtp->target_work[j].target == target) { target_info = &wtp->target_work[j]; break; } } /* If there's no entry for this target, create one. */ if (target_info == NULL) { wtp->num_target_threads++; wtp->target_work = MVM_realloc(wtp->target_work, wtp->num_target_threads * sizeof(ThreadWork)); target_info = &wtp->target_work[wtp->num_target_threads - 1]; target_info->target = target; target_info->work = NULL; } /* See if there's a currently active list; create it if not. */ if (!target_info->work) { target_info->work = calloc(sizeof(MVMGCPassedWork), 1); } /* Add this item to the work list. */ target_info->work->items[target_info->work->num_items] = item_ptr; target_info->work->num_items++; /* If we've hit the limit, pass this work to the target thread. */ if (target_info->work->num_items == MVM_GC_PASS_WORK_SIZE) { push_work_to_thread_in_tray(tc, target, target_info->work); target_info->work = NULL; } }
static MVMuint32 signal_all_but(MVMThreadContext *tc, MVMThread *t, MVMThread *tail) { MVMInstance *ins = tc->instance; MVMuint32 i; MVMuint32 count = 0; MVMThread *next; if (!t) { return 0; } do { next = t->body.next; switch (t->body.stage) { case MVM_thread_stage_starting: case MVM_thread_stage_waiting: case MVM_thread_stage_started: if (t->body.tc != tc) { count += signal_one_thread(tc, t->body.tc); } break; case MVM_thread_stage_exited: GCORCH_LOG(tc, "Thread %d run %d : queueing to clear nursery of thread %d\n", t->body.tc->thread_id); add_work(tc, t->body.tc); break; case MVM_thread_stage_clearing_nursery: GCORCH_LOG(tc, "Thread %d run %d : queueing to destroy thread %d\n", t->body.tc->thread_id); /* last GC run for this thread */ add_work(tc, t->body.tc); break; case MVM_thread_stage_destroyed: GCORCH_LOG(tc, "Thread %d run %d : found a destroyed thread\n"); /* will be cleaned up (removed from the lists) shortly */ break; default: MVM_panic(MVM_exitcode_gcorch, "Corrupted MVMThread or running threads list: invalid thread stage %d", t->body.stage); } } while (next && (t = next)); if (tail) MVM_WB(tc, t, tail); t->body.next = tail; return count; }
/* Called when we take a continuation. Leaves the static frames from the point * of view of the profiler, and saves each of them. */ MVMProfileContinuationData * MVM_profile_log_continuation_control(MVMThreadContext *tc, MVMFrame *root_frame) { MVMProfileThreadData *ptd = get_thread_data(tc); MVMProfileContinuationData *cd = MVM_malloc(sizeof(MVMProfileContinuationData)); MVMStaticFrame **sfs = NULL; MVMuint64 *modes = NULL; MVMFrame *cur_frame = tc->cur_frame; MVMuint64 alloc_sfs = 0; MVMuint64 num_sfs = 0; MVMFrame *last_frame; do { MVMProfileCallNode *lpcn; do { MVMProfileCallNode *pcn = ptd->current_call; if (!pcn) MVM_panic(1, "Profiler lost sequence in continuation control"); if (num_sfs == alloc_sfs) { alloc_sfs += 16; sfs = MVM_realloc(sfs, alloc_sfs * sizeof(MVMStaticFrame *)); modes = MVM_realloc(modes, alloc_sfs * sizeof(MVMuint64)); } sfs[num_sfs] = pcn->sf; modes[num_sfs] = pcn->entry_mode; num_sfs++; lpcn = pcn; log_exit(tc, 1); } while (lpcn->sf != cur_frame->static_info); last_frame = cur_frame; cur_frame = cur_frame->caller; } while (last_frame != root_frame); cd->sfs = sfs; cd->num_sfs = num_sfs; cd->modes = modes; return cd; }
/* Unwinds after a handler. */ static void unwind_after_handler(MVMThreadContext *tc, void *sr_data) { MVMFrame *frame; MVMException *exception; MVMuint32 goto_offset; MVMuint8 *abs_address; /* Get active handler; sanity check (though it's possible other cases * should be supported). */ MVMActiveHandler *ah = (MVMActiveHandler *)sr_data; if (tc->active_handlers != ah) MVM_panic(1, "Trying to unwind from wrong handler"); /* Grab info we'll need to unwind. */ frame = ah->frame; exception = (MVMException *)ah->ex_obj; if (ah->jit_handler) { void **labels = frame->spesh_cand->jitcode->labels; frame->jit_entry_label = labels[ah->jit_handler->goto_label]; abs_address = frame->spesh_cand->jitcode->bytecode; goto_offset = 0; } else { goto_offset = ah->handler->goto_offset; abs_address = NULL; } /* Clean up. */ tc->active_handlers = ah->next_handler; MVM_frame_dec_ref(tc, ah->frame); MVM_free(ah); /* Do the unwinding as needed. */ if (exception && exception->body.return_after_unwind) { MVM_frame_unwind_to(tc, frame->caller, NULL, 0, tc->last_handler_result); } else { MVM_frame_unwind_to(tc, frame, abs_address, goto_offset, NULL); } }
/* Adds a location holding a collectable object to the permanent list of GC * roots, so that it will always be marked and never die. Note that the * address of the collectable must be passed, since it will need to be * updated. */ void MVM_gc_root_add_permanent_desc(MVMThreadContext *tc, MVMCollectable **obj_ref, char *description) { if (obj_ref == NULL) MVM_panic(MVM_exitcode_gcroots, "Illegal attempt to add null object address as a permanent root"); uv_mutex_lock(&tc->instance->mutex_permroots); /* Allocate extra permanent root space if needed. */ if (tc->instance->num_permroots == tc->instance->alloc_permroots) { tc->instance->alloc_permroots *= 2; tc->instance->permroots = MVM_realloc(tc->instance->permroots, sizeof(MVMCollectable **) * tc->instance->alloc_permroots); tc->instance->permroot_descriptions = MVM_realloc( tc->instance->permroot_descriptions, sizeof(char *) * tc->instance->alloc_permroots); } /* Add this one to the list. */ tc->instance->permroots[tc->instance->num_permroots] = obj_ref; tc->instance->permroot_descriptions[tc->instance->num_permroots] = description; tc->instance->num_permroots++; uv_mutex_unlock(&tc->instance->mutex_permroots); }
static MVMuint32 signal_all_but(MVMThreadContext *tc, MVMThread *t, MVMThread *tail) { MVMuint32 count = 0; MVMThread *next; if (!t) { return 0; } do { next = t->body.next; switch (MVM_load(&t->body.stage)) { case MVM_thread_stage_starting: case MVM_thread_stage_waiting: case MVM_thread_stage_started: if (t->body.tc != tc) { count += signal_one_thread(tc, t->body.tc); } break; case MVM_thread_stage_exited: GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : queueing to clear nursery of thread %d\n", t->body.tc->thread_id); add_work(tc, t->body.tc); break; case MVM_thread_stage_clearing_nursery: GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : queueing to destroy thread %d\n", t->body.tc->thread_id); /* last GC run for this thread */ add_work(tc, t->body.tc); break; case MVM_thread_stage_destroyed: GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : found a destroyed thread\n"); /* will be cleaned up (removed from the lists) shortly */ break; default: MVM_panic(MVM_exitcode_gcorch, "Corrupted MVMThread or running threads list: invalid thread stage %"MVM_PRSz"", MVM_load(&t->body.stage)); } } while (next && (t = next)); if (tail) MVM_gc_write_barrier(tc, (MVMCollectable *)t, (MVMCollectable *)tail); t->body.next = tail; return count; }
/* Some objects, having been copied, need no further attention. Others * need to do some additional freeing, however. This goes through the * fromspace and does any needed work to free uncopied things (this may * run in parallel with the mutator, which will be operating on tospace). */ void MVM_gc_collect_free_nursery_uncopied(MVMThreadContext *tc, void *limit) { /* We start scanning the fromspace, and keep going until we hit * the end of the area allocated in it. */ void *scan = tc->nursery_fromspace; while (scan < limit) { /* The object here is dead if it never got a forwarding pointer * written in to it. */ MVMCollectable *item = (MVMCollectable *)scan; MVMuint8 dead = item->forwarder == NULL; /* Now go by collectable type. */ if (!(item->flags & (MVM_CF_TYPE_OBJECT | MVM_CF_STABLE))) { /* Object instance. If dead, call gc_free if needed. Scan is * incremented by object size. */ MVMObject *obj = (MVMObject *)item; GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : collecting an object %p in the nursery with reprid %d\n", item, REPR(obj)->ID); if (dead && REPR(obj)->gc_free) REPR(obj)->gc_free(tc, obj); } else if (item->flags & MVM_CF_TYPE_OBJECT) { /* Type object; doesn't have anything extra that needs freeing. */ } else if (item->flags & MVM_CF_STABLE) { MVMSTable *st = (MVMSTable *)item; if (dead) { /* GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : enqueuing an STable %d in the nursery to be freed\n", item);*/ MVM_gc_collect_enqueue_stable_for_deletion(tc, st); } } else { printf("item flags: %d\n", item->flags); MVM_panic(MVM_exitcode_gcnursery, "Internal error: impossible case encountered in GC free"); } /* Go to the next item. */ scan = (char *)scan + item->size; } }
MVMObject * collectables_str(MVMThreadContext *tc, MVMHeapSnapshot *s) { /* Produces ; separated sequences of: * kind,type_or_frame_index,collectable_size,unmanaged_size,refs_start,num_refs * All of which are integers. */ MVMObject *result; size_t buffer_size = 20 * s->num_collectables; size_t buffer_pos = 0; char *buffer = MVM_malloc(buffer_size); MVMuint64 i; for (i = 0; i < s->num_collectables; i++) { char tmp[256]; int item_chars = snprintf(tmp, 256, "%"PRIu16",%"PRId32",%"PRIu16",%"PRIu64",%"PRIu64",%"PRIu32";", s->collectables[i].kind, s->collectables[i].type_or_frame_index, s->collectables[i].collectable_size, s->collectables[i].unmanaged_size, s->collectables[i].num_refs ? s->collectables[i].refs_start : (MVMuint64)0, s->collectables[i].num_refs); if (item_chars < 0) MVM_panic(1, "Failed to save collectable in heap snapshot"); if (buffer_pos + item_chars >= buffer_size) { buffer_size += 4096; buffer = MVM_realloc(buffer, buffer_size); } memcpy(buffer + buffer_pos, tmp, item_chars); buffer_pos += item_chars; } if (buffer_pos > 1) buffer[buffer_pos - 1] = 0; /* Cut off the trailing ; for ease of parsing */ buffer[buffer_pos] = 0; result = box_s(tc, vmstr(tc, buffer)); MVM_free(buffer); return result; }
/* If we have to deopt inside of a frame containing inlines, and we're in * an inlined frame at the point we hit deopt, we need to undo the inlining * by switching all levels of inlined frame out for a bunch of frames that * are running the de-optimized code. We may, of course, be in the original, * non-inline, bit of the code - in which case we've nothing to do. */ static void uninline(MVMThreadContext *tc, MVMFrame *f, MVMSpeshCandidate *cand, MVMint32 offset, MVMint32 deopt_offset, MVMFrame *callee) { MVMFrame *last_uninlined = NULL; MVMuint16 last_res_reg; MVMReturnType last_res_type; MVMuint32 last_return_deopt_idx; MVMint32 i; for (i = 0; i < cand->num_inlines; i++) { if (offset >= cand->inlines[i].start && offset < cand->inlines[i].end) { /* Create the frame. */ MVMCode *ucode = (MVMCode *)f->work[cand->inlines[i].code_ref_reg].o; MVMStaticFrame *usf = cand->inlines[i].sf; MVMFrame *uf; if (REPR(ucode)->ID != MVM_REPR_ID_MVMCode) MVM_panic(1, "Deopt: did not find code object when uninlining"); MVMROOT(tc, f, { MVMROOT(tc, callee, { MVMROOT(tc, last_uninlined, { MVMROOT(tc, usf, { uf = MVM_frame_create_for_deopt(tc, usf, ucode); }); }); });
void MVM_thread_join(MVMThreadContext *tc, MVMObject *thread_obj) { if (REPR(thread_obj)->ID == MVM_REPR_ID_MVMThread) { MVMThread *thread = (MVMThread *)thread_obj; int status; MVM_gc_root_temp_push(tc, (MVMCollectable **)&thread); MVM_gc_mark_thread_blocked(tc); if (((MVMThread *)thread_obj)->body.stage < MVM_thread_stage_exited) { status = uv_thread_join(&thread->body.thread); } else { /* the target already ended */ /* used to be APR_SUCCESS, but then we ditched APR */ status = 0; } MVM_gc_mark_thread_unblocked(tc); MVM_gc_root_temp_pop(tc); if (status < 0) MVM_panic(MVM_exitcode_compunit, "Could not join thread: errorcode %d", status); } else { MVM_exception_throw_adhoc(tc, "Thread handle passed to join must have representation MVMThread"); } }
/* Goes through all threads but the current one and notifies them that a * GC run is starting. Those that are blocked are considered excluded from * the run, and are not counted. Returns the count of threads that should be * added to the finished countdown. */ static MVMuint32 signal_one_thread(MVMThreadContext *tc, MVMThreadContext *to_signal) { /* Loop here since we may not succeed first time (e.g. the status of the * thread may change between the two ways we try to twiddle it). */ while (1) { switch (MVM_load(&to_signal->gc_status)) { case MVMGCStatus_NONE: /* Try to set it from running to interrupted - the common case. */ if (MVM_cas(&to_signal->gc_status, MVMGCStatus_NONE, MVMGCStatus_INTERRUPT) == MVMGCStatus_NONE) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Signalled thread %d to interrupt\n", to_signal->thread_id); return 1; } break; case MVMGCStatus_INTERRUPT: GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : thread %d already interrupted\n", to_signal->thread_id); return 0; case MVMGCStatus_UNABLE: /* Otherwise, it's blocked; try to set it to work Stolen. */ if (MVM_cas(&to_signal->gc_status, MVMGCStatus_UNABLE, MVMGCStatus_STOLEN) == MVMGCStatus_UNABLE) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : A blocked thread %d spotted; work stolen\n", to_signal->thread_id); add_work(tc, to_signal); return 0; } break; /* this case occurs if a child thread is Stolen by its parent * before we get to it in the chain. */ case MVMGCStatus_STOLEN: GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : thread %d already stolen (it was a spawning child)\n", to_signal->thread_id); return 0; default: MVM_panic(MVM_exitcode_gcorch, "invalid status %"MVM_PRSz" in GC orchestrate\n", MVM_load(&to_signal->gc_status)); return 0; } } }
/* Unwinds execution state to the specified frame. */ static void unwind_to_frame(MVMThreadContext *tc, MVMFrame *target) { while (tc->cur_frame != target) if (!MVM_frame_try_unwind(tc)) MVM_panic(1, "Internal error: Unwound entire stack and missed handler"); }
/* This is called when the allocator finds it has run out of memory and wants * to trigger a GC run. In this case, it's possible (probable, really) that it * will need to do that triggering, notifying other running threads that the * time has come to GC. */ void MVM_gc_enter_from_allocator(MVMThreadContext *tc) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Entered from allocate\n"); /* Try to start the GC run. */ if (MVM_trycas(&tc->instance->gc_start, 0, 1)) { MVMThread *last_starter = NULL; MVMuint32 num_threads = 0; MVMuint32 is_full; /* Need to wait for other threads to reset their gc_status. */ while (MVM_load(&tc->instance->gc_ack)) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : waiting for other thread's gc_ack\n"); MVM_platform_thread_yield(); } /* We are the winner of the GC starting race. This gives us some * extra responsibilities as well as doing the usual things. * First, increment GC sequence number. */ MVM_incr(&tc->instance->gc_seq_number); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : GC thread elected coordinator: starting gc seq %d\n", (int)MVM_load(&tc->instance->gc_seq_number)); /* Decide if it will be a full collection. */ is_full = is_full_collection(tc); /* If profiling, record that GC is starting. */ if (tc->instance->profiling) MVM_profiler_log_gc_start(tc, is_full); /* Ensure our stolen list is empty. */ tc->gc_work_count = 0; /* Flag that we didn't agree on this run that all the in-trays are * cleared (a responsibility of the co-ordinator. */ MVM_store(&tc->instance->gc_intrays_clearing, 1); /* We'll take care of our own work. */ add_work(tc, tc); /* Find other threads, and signal or steal. */ do { MVMThread *threads = (MVMThread *)MVM_load(&tc->instance->threads); if (threads && threads != last_starter) { MVMThread *head = threads; MVMuint32 add; while ((threads = (MVMThread *)MVM_casptr(&tc->instance->threads, head, NULL)) != head) { head = threads; } add = signal_all_but(tc, head, last_starter); last_starter = head; if (add) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Found %d other threads\n", add); MVM_add(&tc->instance->gc_start, add); num_threads += add; } } /* If there's an event loop thread, wake it up to participate. */ if (tc->instance->event_loop_wakeup) uv_async_send(tc->instance->event_loop_wakeup); } while (MVM_load(&tc->instance->gc_start) > 1); /* Sanity checks. */ if (!MVM_trycas(&tc->instance->threads, NULL, last_starter)) MVM_panic(MVM_exitcode_gcorch, "threads list corrupted\n"); if (MVM_load(&tc->instance->gc_finish) != 0) MVM_panic(MVM_exitcode_gcorch, "Finish votes was %"MVM_PRSz"\n", MVM_load(&tc->instance->gc_finish)); /* gc_ack gets an extra so the final acknowledger * can also free the STables. */ MVM_store(&tc->instance->gc_finish, num_threads + 1); MVM_store(&tc->instance->gc_ack, num_threads + 2); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : finish votes is %d\n", (int)MVM_load(&tc->instance->gc_finish)); /* Now we're ready to start, zero promoted since last full collection * counter if this is a full collect. */ if (is_full) MVM_store(&tc->instance->gc_promoted_bytes_since_last_full, 0); /* Signal to the rest to start */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : coordinator signalling start\n"); if (MVM_decr(&tc->instance->gc_start) != 1) MVM_panic(MVM_exitcode_gcorch, "Start votes was %"MVM_PRSz"\n", MVM_load(&tc->instance->gc_start)); /* Start collecting. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : coordinator entering run_gc\n"); run_gc(tc, MVMGCWhatToDo_All); /* Free any STables that have been marked for deletion. It's okay for * us to muck around in another thread's fromspace while it's mutating * tospace, really. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Freeing STables if needed\n"); MVM_gc_collect_free_stables(tc); /* If profiling, record that GC is over. */ if (tc->instance->profiling) MVM_profiler_log_gc_end(tc); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : GC complete (cooridnator)\n"); } else { /* Another thread beat us to starting the GC sync process. Thus, act as * if we were interrupted to GC. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Lost coordinator election\n"); MVM_gc_enter_from_interrupt(tc); } }