/* Initializes a new thread context. Note that this doesn't set up a * thread itself, it just creates the data structure that exists in * MoarVM per thread. */ MVMThreadContext * MVM_tc_create(MVMThreadContext *parent, MVMInstance *instance) { MVMThreadContext *tc = MVM_calloc(1, sizeof(MVMThreadContext)); MVMint32 i; /* Associate with VM instance. */ tc->instance = instance; /* Set up GC nursery. We only allocate tospace initially, and allocate * fromspace the first time this thread GCs, provided it ever does. */ tc->nursery_tospace_size = MVM_gc_new_thread_nursery_size(instance); tc->nursery_tospace = MVM_calloc(1, tc->nursery_tospace_size); tc->nursery_alloc = tc->nursery_tospace; tc->nursery_alloc_limit = (char *)tc->nursery_alloc + tc->nursery_tospace_size; /* Set up temporary root handling. */ tc->num_temproots = 0; tc->alloc_temproots = MVM_TEMP_ROOT_BASE_ALLOC; tc->temproots = MVM_malloc(sizeof(MVMCollectable **) * tc->alloc_temproots); /* Set up intergenerational root handling. */ tc->num_gen2roots = 0; tc->alloc_gen2roots = 64; tc->gen2roots = MVM_malloc(sizeof(MVMCollectable *) * tc->alloc_gen2roots); /* Set up the second generation allocator. */ tc->gen2 = MVM_gc_gen2_create(instance); /* The fixed size allocator also keeps pre-thread state. */ MVM_fixed_size_create_thread(tc); /* Allocate an initial call stack region for the thread. */ MVM_callstack_region_init(tc); /* Initialize random number generator state. */ MVM_proc_seed(tc, (MVM_platform_now() / 10000) * MVM_proc_getpid(tc)); /* Allocate temporary big integers. */ for (i = 0; i < MVM_NUM_TEMP_BIGINTS; i++) { tc->temp_bigints[i] = MVM_malloc(sizeof(mp_int)); mp_init(tc->temp_bigints[i]); } /* Initialize frame sequence numbers */ tc->next_frame_nr = 0; tc->current_frame_nr = 0; /* Initialize last_payload, so we can be sure it's never NULL and don't * need to check. */ tc->last_payload = instance->VMNull; /* Initialize plugin_guard_args so we never have to do a NULL check */ tc->plugin_guard_args = instance->VMNull; /* Note that these two assignments above are repeated in * MVM_6model_bootstrap because VMNull doesn't exist yet when the very * first tc is created. */ return tc; }
/* Initializes a new thread context. Note that this doesn't set up a * thread itself, it just creates the data structure that exists in * MoarVM per thread. */ MVMThreadContext * MVM_tc_create(MVMThreadContext *parent, MVMInstance *instance) { MVMThreadContext *tc = MVM_calloc(1, sizeof(MVMThreadContext)); /* Associate with VM instance. */ tc->instance = instance; /* Use default loop for main thread; create a new one for others. */ if (instance->main_thread) { int r; tc->loop = MVM_calloc(1, sizeof(uv_loop_t)); r = uv_loop_init(tc->loop); if (r < 0) { MVM_free(tc->loop); MVM_free(tc); MVM_exception_throw_adhoc(parent, "Could not create a new Thread: %s", uv_strerror(r)); } } else { tc->loop = uv_default_loop(); } /* Set up GC nursery. We only allocate tospace initially, and allocate * fromspace the first time this thread GCs, provided it ever does. */ tc->nursery_tospace = MVM_calloc(1, MVM_NURSERY_SIZE); tc->nursery_alloc = tc->nursery_tospace; tc->nursery_alloc_limit = (char *)tc->nursery_alloc + MVM_NURSERY_SIZE; /* Set up temporary root handling. */ tc->num_temproots = 0; tc->alloc_temproots = MVM_TEMP_ROOT_BASE_ALLOC; tc->temproots = MVM_malloc(sizeof(MVMCollectable **) * tc->alloc_temproots); /* Set up intergenerational root handling. */ tc->num_gen2roots = 0; tc->alloc_gen2roots = 64; tc->gen2roots = MVM_malloc(sizeof(MVMCollectable *) * tc->alloc_gen2roots); /* Set up the second generation allocator. */ tc->gen2 = MVM_gc_gen2_create(instance); /* Allocate an initial call stack region for the thread. */ MVM_callstack_region_init(tc); /* Initialize random number generator state. */ MVM_proc_seed(tc, (MVM_platform_now() / 10000) * MVM_proc_getpid(tc)); /* Initialize frame sequence numbers */ tc->next_frame_nr = 0; tc->current_frame_nr = 0; /* Initialize last_payload, so we can be sure it's never NULL and don't * need to check. */ tc->last_payload = instance->VMNull; return tc; }
/* Initializes a new thread context. Note that this doesn't set up a * thread itself, it just creates the data structure that exists in * MoarVM per thread. */ MVMThreadContext * MVM_tc_create(MVMInstance *instance) { MVMThreadContext *tc = calloc(1, sizeof(MVMThreadContext)); /* Associate with VM instance. */ tc->instance = instance; /* Set up GC nursery. */ tc->nursery_fromspace = calloc(1, MVM_NURSERY_SIZE); tc->nursery_tospace = calloc(1, MVM_NURSERY_SIZE); tc->nursery_alloc = tc->nursery_tospace; tc->nursery_alloc_limit = (char *)tc->nursery_alloc + MVM_NURSERY_SIZE; /* Set up temporary root handling. */ tc->num_temproots = 0; tc->alloc_temproots = 16; tc->temproots = malloc(sizeof(MVMCollectable **) * tc->alloc_temproots); /* Set up intergenerational root handling. */ tc->num_gen2roots = 0; tc->alloc_gen2roots = 64; tc->gen2roots = malloc(sizeof(MVMCollectable *) * tc->alloc_gen2roots); /* Set up the second generation allocator. */ tc->gen2 = MVM_gc_gen2_create(instance); /* Set up table of per-static-frame chains. */ /* XXX For non-first threads, make them start with the size of the main thread's table. or, look into lazily initializing this. */ tc->frame_pool_table_size = MVMInitialFramePoolTableSize; tc->frame_pool_table = calloc(MVMInitialFramePoolTableSize, sizeof(MVMFrame *)); tc->loop = instance->default_loop ? uv_loop_new() : uv_default_loop(); /* Create a CallCapture for usecapture instructions in this thread (needs * special handling in initial thread as this runs before bootstrap). */ if (instance->CallCapture) tc->cur_usecapture = MVM_repr_alloc_init(tc, instance->CallCapture); /* Initialize random number generator state. */ MVM_proc_seed(tc, (MVM_platform_now() / 10000) * MVM_proc_getpid(tc)); #if MVM_HLL_PROFILE_CALLS #define PROFILE_INITIAL_SIZE (1 << 29) tc->profile_data_size = PROFILE_INITIAL_SIZE; tc->profile_data = malloc(sizeof(MVMProfileRecord) * PROFILE_INITIAL_SIZE); tc->profile_index = 0; #endif return tc; }
/* gets the system time since the epoch as floating point seconds */ MVMnum64 MVM_proc_time_n(MVMThreadContext *tc) { return (MVMnum64)MVM_platform_now() / 1000000000.0; }
/* gets the system time since the epoch truncated to integral seconds */ MVMint64 MVM_proc_time_i(MVMThreadContext *tc) { return (MVMint64)(MVM_platform_now() / 1000000000); }
/* Removes a single frame, as part of a return or unwind. Done after any exit * handler has already been run. */ static MVMuint64 remove_one_frame(MVMThreadContext *tc, MVMuint8 unwind) { MVMFrame *returner = tc->cur_frame; MVMFrame *caller = returner->caller; /* Some cleanup we only need do if we're not a frame involved in a * continuation (otherwise we need to allow for multi-shot * re-invocation). */ if (!returner->in_continuation) { /* Arguments buffer no longer in use (saves GC visiting it). */ returner->cur_args_callsite = NULL; /* Clear up argument processing leftovers, if any. */ if (returner->work) { MVM_args_proc_cleanup_for_cache(tc, &returner->params); } /* Clear up any continuation tags. */ if (returner->continuation_tags) { MVMContinuationTag *tag = returner->continuation_tags; while (tag) { MVMContinuationTag *next = tag->next; free(tag); tag = next; } returner->continuation_tags = NULL; } /* Signal to the GC to ignore ->work */ returner->tc = NULL; /* Unless we need to keep the caller chain in place, clear it up. */ if (caller) { if (!returner->keep_caller) { MVM_frame_dec_ref(tc, caller); returner->caller = NULL; } else if (unwind) { caller->keep_caller = 1; } } } #if MVM_HLL_PROFILE_CALLS tc->profile_data[returner->profile_index].duration_nanos = MVM_platform_now() - tc->profile_data[returner->profile_index].duration_nanos; #endif /* Decrement the frame's ref-count by the 1 it got by virtue of being the * currently executing frame. */ MVM_frame_dec_ref(tc, returner); /* Switch back to the caller frame if there is one. */ if (caller && returner != tc->thread_entry_frame) { tc->cur_frame = caller; *(tc->interp_cur_op) = caller->return_address; *(tc->interp_bytecode_start) = caller->effective_bytecode; *(tc->interp_reg_base) = caller->work; *(tc->interp_cu) = caller->static_info->body.cu; /* Handle any special return hooks. */ if (caller->special_return || caller->special_unwind) { MVMSpecialReturn sr = caller->special_return; MVMSpecialReturn su = caller->special_unwind; caller->special_return = NULL; caller->special_unwind = NULL; if (unwind && su) su(tc, caller->special_return_data); else if (!unwind && sr) sr(tc, caller->special_return_data); caller->mark_special_return_data = NULL; } return 1; } else { tc->cur_frame = NULL; return 0; } }
/* Takes a static frame and a thread context. Invokes the static frame. */ void MVM_frame_invoke(MVMThreadContext *tc, MVMStaticFrame *static_frame, MVMCallsite *callsite, MVMRegister *args, MVMFrame *outer, MVMObject *code_ref) { MVMFrame *frame; MVMuint32 pool_index, found_spesh; MVMFrame *node; int fresh = 0; MVMStaticFrameBody *static_frame_body = &static_frame->body; /* If the frame was never invoked before, need initial calculations * and verification. */ if (!static_frame_body->invoked) prepare_and_verify_static_frame(tc, static_frame); /* Get frame body from the re-use pool, or allocate it. */ pool_index = static_frame_body->pool_index; if (pool_index >= tc->frame_pool_table_size) grow_frame_pool(tc, pool_index); node = tc->frame_pool_table[pool_index]; if (node == NULL) { fresh = 1; frame = malloc(sizeof(MVMFrame)); frame->params.named_used = NULL; /* Ensure special return pointers and continuation tags are null. */ frame->special_return = NULL; frame->special_unwind = NULL; frame->continuation_tags = NULL; } else { tc->frame_pool_table[pool_index] = node->outer; node->outer = NULL; frame = node; } #if MVM_HLL_PROFILE_CALLS frame->profile_index = tc->profile_index; tc->profile_data[frame->profile_index].duration_nanos = MVM_platform_now(); tc->profile_data[frame->profile_index].callsite_id = 0; /* XXX get a real callsite id */ tc->profile_data[frame->profile_index].code_id = 0; /* XXX get a real code id */ /* increment the profile data index */ ++tc->profile_index; if (tc->profile_index == tc->profile_data_size) { tc->profile_data_size *= 2; tc->profile_data = realloc(tc->profile_data, tc->profile_data_size); } #endif /* Copy thread context (back?) into the frame. */ frame->tc = tc; /* Set static frame. */ frame->static_info = static_frame; /* Store the code ref (NULL at the top-level). */ frame->code_ref = code_ref; /* Allocate space for lexicals and work area, copying the default lexical * environment into place. */ if (static_frame_body->env_size) { if (fresh) frame->env = malloc(static_frame_body->env_size); memcpy(frame->env, static_frame_body->static_env, static_frame_body->env_size); } else { frame->env = NULL; } if (static_frame_body->work_size) { if (fresh || !frame->work) frame->work = malloc(static_frame_body->work_size); memset(frame->work, 0, static_frame_body->work_size); } else { frame->work = NULL; } /* Calculate args buffer position and make sure current call site starts * empty. */ frame->args = static_frame_body->work_size ? frame->work + static_frame_body->num_locals : NULL; frame->cur_args_callsite = NULL; /* Outer. */ if (outer) { /* We were provided with an outer frame; just ensure that it is * based on the correct static frame (compare on bytecode address * to come with nqp::freshcoderef). */ if (outer->static_info->body.bytecode == static_frame_body->outer->body.bytecode) frame->outer = outer; else MVM_exception_throw_adhoc(tc, "When invoking %s, Provided outer frame %p (%s %s) does not match expected static frame type %p (%s %s)", static_frame_body->name ? MVM_string_utf8_encode_C_string(tc, static_frame_body->name) : "<anonymous static frame>", outer->static_info, MVM_repr_get_by_id(tc, REPR(outer->static_info)->ID)->name, outer->static_info->body.name ? MVM_string_utf8_encode_C_string(tc, outer->static_info->body.name) : "<anonymous static frame>", static_frame_body->outer, MVM_repr_get_by_id(tc, REPR(static_frame_body->outer)->ID)->name, static_frame_body->outer->body.name ? MVM_string_utf8_encode_C_string(tc, static_frame_body->outer->body.name) : "<anonymous static frame>"); } else if (static_frame_body->static_code && static_frame_body->static_code->body.outer) { /* We're lacking an outer, but our static code object may have one. * This comes up in the case of cloned protoregexes, for example. */ frame->outer = static_frame_body->static_code->body.outer; } else if (static_frame_body->outer) { /* Auto-close, and cache it in the static frame. */ frame->outer = autoclose(tc, static_frame_body->outer); static_frame_body->static_code->body.outer = MVM_frame_inc_ref(tc, frame->outer); } else { frame->outer = NULL; } if (frame->outer) MVM_frame_inc_ref(tc, frame->outer); /* Caller is current frame in the thread context. */ if (tc->cur_frame) frame->caller = MVM_frame_inc_ref(tc, tc->cur_frame); else frame->caller = NULL; frame->keep_caller = 0; frame->in_continuation = 0; /* Initial reference count is 1 by virtue of it being the currently * executing frame. */ MVM_store(&frame->ref_count, 1); MVM_store(&frame->gc_seq_number, 0); /* Initialize argument processing. */ MVM_args_proc_init(tc, &frame->params, callsite, args); /* Make sure there's no frame context pointer and special return data * won't be marked. */ frame->context_object = NULL; frame->mark_special_return_data = NULL; /* Clear frame flags. */ frame->flags = 0; /* See if any specializations apply. */ found_spesh = 0; if (++static_frame_body->invocations >= 10 && callsite->is_interned) { /* Look for specialized bytecode. */ MVMint32 num_spesh = static_frame_body->num_spesh_candidates; MVMint32 i, j; for (i = 0; i < num_spesh; i++) { MVMSpeshCandidate *cand = &static_frame_body->spesh_candidates[i]; if (cand->cs == callsite) { MVMint32 match = 1; for (j = 0; j < cand->num_guards; j++) { MVMint32 pos = cand->guards[j].slot; MVMSTable *st = (MVMSTable *)cand->guards[j].match; MVMObject *arg = args[pos].o; if (!arg) { match = 0; break; } switch (cand->guards[j].kind) { case MVM_SPESH_GUARD_CONC: if (!IS_CONCRETE(arg) || STABLE(arg) != st) match = 0; break; case MVM_SPESH_GUARD_TYPE: if (IS_CONCRETE(arg) || STABLE(arg) != st) match = 0; break; case MVM_SPESH_GUARD_DC_CONC: { MVMRegister dc; STABLE(arg)->container_spec->fetch(tc, arg, &dc); if (!dc.o || !IS_CONCRETE(dc.o) || STABLE(dc.o) != st) match = 0; break; } case MVM_SPESH_GUARD_DC_TYPE: { MVMRegister dc; STABLE(arg)->container_spec->fetch(tc, arg, &dc); if (!dc.o || IS_CONCRETE(dc.o) || STABLE(dc.o) != st) match = 0; break; } } if (!match) break; } if (match) { frame->effective_bytecode = cand->bytecode; frame->effective_handlers = cand->handlers; frame->effective_spesh_slots = cand->spesh_slots; frame->spesh_cand = cand; found_spesh = 1; break; } } } /* If we didn't find any, and we're below the limit, can generate a * specialization. */ if (!found_spesh && num_spesh < MVM_SPESH_LIMIT && tc->instance->spesh_enabled) { MVMSpeshCandidate *cand = MVM_spesh_candidate_generate(tc, static_frame, callsite, args); if (cand) { frame->effective_bytecode = cand->bytecode; frame->effective_handlers = cand->handlers; frame->effective_spesh_slots = cand->spesh_slots; frame->spesh_cand = cand; found_spesh = 1; } } } if (!found_spesh) { frame->effective_bytecode = static_frame_body->bytecode; frame->effective_handlers = static_frame_body->handlers; frame->spesh_cand = NULL; } /* Update interpreter and thread context, so next execution will use this * frame. */ tc->cur_frame = frame; *(tc->interp_cur_op) = frame->effective_bytecode; *(tc->interp_bytecode_start) = frame->effective_bytecode; *(tc->interp_reg_base) = frame->work; *(tc->interp_cu) = static_frame_body->cu; /* If we need to do so, make clones of things in the lexical environment * that need it. Note that we do this after tc->cur_frame became the * current frame, to make sure these new objects will certainly get * marked if GC is triggered along the way. */ if (static_frame_body->static_env_flags) { /* Drag everything out of static_frame_body before we start, * as GC action may invalidate it. */ MVMuint8 *flags = static_frame_body->static_env_flags; MVMint64 numlex = static_frame_body->num_lexicals; MVMRegister *state = NULL; MVMint64 state_act = 0; /* 0 = none so far, 1 = first time, 2 = later */ MVMint64 i; for (i = 0; i < numlex; i++) { switch (flags[i]) { case 0: break; case 1: frame->env[i].o = MVM_repr_clone(tc, frame->env[i].o); break; case 2: redo_state: switch (state_act) { case 0: if (!frame->code_ref) MVM_exception_throw_adhoc(tc, "Frame must have code-ref to have state variables"); state = ((MVMCode *)frame->code_ref)->body.state_vars; if (state) { /* Already have state vars; pull them from this. */ state_act = 2; } else { /* Allocate storage for state vars. */ state = malloc(frame->static_info->body.env_size); memset(state, 0, frame->static_info->body.env_size); ((MVMCode *)frame->code_ref)->body.state_vars = state; state_act = 1; /* Note that this frame should run state init code. */ frame->flags |= MVM_FRAME_FLAG_STATE_INIT; } goto redo_state; case 1: frame->env[i].o = MVM_repr_clone(tc, frame->env[i].o); MVM_ASSIGN_REF(tc, &(frame->code_ref->header), state[i].o, frame->env[i].o); break; case 2: frame->env[i].o = state[i].o; break; } break; default: MVM_exception_throw_adhoc(tc, "Unknown lexical environment setup flag"); } } } }