static MVMint32 NFD_and_push_collation_values (MVMThreadContext *tc, MVMCodepoint cp, collation_stack *stack, MVMCodepointIter *ci, char *name) { MVMNormalizer norm; MVMCodepoint cp_out; MVMint32 ready, result_pos = 0; MVMCodepoint *result = MVM_malloc(sizeof(MVMCodepoint) * initial_collation_norm_buf_size); MVMint32 result_size = initial_collation_norm_buf_size; MVMint64 rtrn = 0; MVM_unicode_normalizer_init(tc, &norm, MVM_NORMALIZE_NFD); ready = MVM_unicode_normalizer_process_codepoint(tc, &norm, cp, &cp_out); if (ready) { if (result_size <= result_pos + ready) result = MVM_realloc(result, sizeof(MVMCodepoint) * (result_size += initial_collation_norm_buf_size)); result[result_pos++] = cp_out; while (0 < --ready) result[result_pos++] = MVM_unicode_normalizer_get_codepoint(tc, &norm); } MVM_unicode_normalizer_eof(tc, &norm); ready = MVM_unicode_normalizer_available(tc, &norm); while (ready--) { if (result_size <= result_pos + ready + 1) result = MVM_realloc(result, sizeof(MVMCodepoint) * (result_size += initial_collation_norm_buf_size)); result[result_pos++] = MVM_unicode_normalizer_get_codepoint(tc, &norm); } /* If the codepoint changed or we now have more than before */ if (result[0] != cp || 1 < result_pos) rtrn = collation_push_cp(tc, stack, ci, result, result_pos, name); if (result) MVM_free(result); return rtrn; }
MVM_STATIC_INLINE void maybe_grow_result(MVMCodepoint **result, MVMint64 *result_alloc, MVMint64 needed) { if (needed >= *result_alloc) { while (needed >= *result_alloc) *result_alloc += 32; *result = MVM_realloc(*result, *result_alloc * sizeof(MVMCodepoint)); } }
/* Log that we're entering a new frame. */ void MVM_profile_log_enter(MVMThreadContext *tc, MVMStaticFrame *sf, MVMuint64 mode) { MVMProfileThreadData *ptd = get_thread_data(tc); /* Try to locate the entry node, if it's in the call graph already. */ MVMProfileCallNode *pcn = NULL; MVMuint32 i; if (ptd->current_call) for (i = 0; i < ptd->current_call->num_succ; i++) if (ptd->current_call->succ[i]->sf == sf) pcn = ptd->current_call->succ[i]; /* If we didn't find a call graph node, then create one and add it to the * graph. */ if (!pcn) { pcn = MVM_calloc(1, sizeof(MVMProfileCallNode)); pcn->sf = sf; if (ptd->current_call) { MVMProfileCallNode *pred = ptd->current_call; pcn->pred = pred; if (pred->num_succ == pred->alloc_succ) { pred->alloc_succ += 8; pred->succ = MVM_realloc(pred->succ, pred->alloc_succ * sizeof(MVMProfileCallNode *)); } pred->succ[pred->num_succ] = pcn; pred->num_succ++; } else { if (!ptd->call_graph) ptd->call_graph = pcn; } } /* Increment entry counts. */ pcn->total_entries++; switch (mode) { case MVM_PROFILE_ENTER_SPESH: pcn->specialized_entries++; break; case MVM_PROFILE_ENTER_SPESH_INLINE: pcn->specialized_entries++; pcn->inlined_entries++; break; case MVM_PROFILE_ENTER_JIT: pcn->jit_entries++; break; case MVM_PROFILE_ENTER_JIT_INLINE: pcn->jit_entries++; pcn->inlined_entries++; break; } pcn->entry_mode = mode; /* Log entry time; clear skip time. */ pcn->cur_entry_time = uv_hrtime(); pcn->cur_skip_time = 0; /* The current call graph node becomes this one. */ ptd->current_call = pcn; }
MVMint64 MVM_io_syncstream_write_str(MVMThreadContext *tc, MVMOSHandle *h, MVMString *str, MVMint64 newline) { MVMIOSyncStreamData *data = (MVMIOSyncStreamData *)h->body.data; char *output; MVMuint64 output_size; uv_write_t *req; uv_buf_t write_buf; int r; output = MVM_string_encode(tc, str, 0, -1, &output_size, data->encoding); if (newline) { output = (char *)MVM_realloc(output, ++output_size); output[output_size - 1] = '\n'; } req = MVM_malloc(sizeof(uv_write_t)); write_buf = uv_buf_init(output, output_size); uv_ref((uv_handle_t *)data->handle); if ((r = uv_write(req, data->handle, &write_buf, 1, write_cb)) < 0) { uv_unref((uv_handle_t *)data->handle); MVM_free(req); MVM_free(output); MVM_exception_throw_adhoc(tc, "Failed to write string to stream: %s", uv_strerror(r)); } else { uv_run(tc->loop, UV_RUN_DEFAULT); MVM_free(output); } data->total_bytes_written += output_size; return output_size; }
MVMObject * references_str(MVMThreadContext *tc, MVMHeapSnapshot *s) { /* Produces ; separated sequences of: * kind,idx,to * All of which are integers. */ MVMObject *result; size_t buffer_size = 10 * s->num_references; size_t buffer_pos = 0; char *buffer = MVM_malloc(buffer_size); MVMuint64 i; for (i = 0; i < s->num_references; i++) { char tmp[128]; int item_chars = snprintf(tmp, 128, "%"PRIu64",%"PRIu64",%"PRIu64";", s->references[i].description & ((1 << MVM_SNAPSHOT_REF_KIND_BITS) - 1), s->references[i].description >> MVM_SNAPSHOT_REF_KIND_BITS, s->references[i].collectable_index); if (item_chars < 0) MVM_panic(1, "Failed to save reference in heap snapshot"); if (buffer_pos + item_chars >= buffer_size) { buffer_size += 4096; buffer = MVM_realloc(buffer, buffer_size); } memcpy(buffer + buffer_pos, tmp, item_chars); buffer_pos += item_chars; } if (buffer_pos > 1) buffer[buffer_pos - 1] = 0; /* Cut off the trailing ; for ease of parsing */ buffer[buffer_pos] = 0; result = box_s(tc, vmstr(tc, buffer)); MVM_free(buffer); return result; }
MVMObject * types_str(MVMThreadContext *tc, MVMHeapSnapshotCollection *col) { /* Produces ; separated sequences of: * repr_string_index,type_name_string_index * Both of which are integers. */ MVMObject *result; size_t buffer_size = 10 * col->num_types; size_t buffer_pos = 0; char *buffer = MVM_malloc(buffer_size); MVMuint64 i; for (i = 0; i < col->num_types; i++) { char tmp[256]; int item_chars = snprintf(tmp, 256, "%"PRIu64",%"PRIu64";", col->types[i].repr_name, col->types[i].type_name); if (item_chars < 0) MVM_panic(1, "Failed to save type in heap snapshot"); if (buffer_pos + item_chars >= buffer_size) { buffer_size += 4096; buffer = MVM_realloc(buffer, buffer_size); } memcpy(buffer + buffer_pos, tmp, item_chars); buffer_pos += item_chars; } if (buffer_pos > 1) buffer[buffer_pos - 1] = 0; /* Cut off the trailing ; for ease of parsing */ buffer[buffer_pos] = 0; result = box_s(tc, vmstr(tc, buffer)); MVM_free(buffer); return result; }
/* Grows storage if it's full, zeroing the extension. Assumes it's only being * grown for one more item. */ static void grow_storage(void *store_ptr, MVMuint64 *num, MVMuint64 *alloc, size_t size) { void **store = (void **)store_ptr; if (*num == *alloc) { *alloc = *alloc ? 2 * *alloc : 32; *store = MVM_realloc(*store, *alloc * size); memset(((char *)*store) + *num * size, 0, (*alloc - *num) * size); } }
/* Log that we've just allocated the passed object (just log the type). */ void MVM_profile_log_allocated(MVMThreadContext *tc, MVMObject *obj) { MVMProfileThreadData *ptd = get_thread_data(tc); MVMProfileCallNode *pcn = ptd->current_call; if (pcn) { /* First, let's see if the allocation is actually at the end of the * nursery; we may have generated some "allocated" log instructions * after operations that may or may not allocate what they return. */ MVMuint32 distance = ((MVMuint64)tc->nursery_alloc - (MVMuint64)obj); if (!obj) { return; } /* Since some ops first allocate, then call something else that may * also allocate, we may have to allow for a bit of grace distance. */ if ((MVMuint64)obj > (MVMuint64)tc->nursery_tospace && distance <= obj->header.size && obj != ptd->last_counted_allocation) { /* See if there's an existing node to update. */ MVMObject *what = STABLE(obj)->WHAT; MVMuint32 i; MVMuint8 allocation_target; if (pcn->entry_mode == MVM_PROFILE_ENTER_SPESH || pcn->entry_mode == MVM_PROFILE_ENTER_SPESH_INLINE) { allocation_target = 1; } else if (pcn->entry_mode == MVM_PROFILE_ENTER_JIT || pcn->entry_mode == MVM_PROFILE_ENTER_JIT_INLINE) { allocation_target = 2; } else { allocation_target = 0; } for (i = 0; i < pcn->num_alloc; i++) { if (pcn->alloc[i].type == what) { if (allocation_target == 0) pcn->alloc[i].allocations_interp++; else if (allocation_target == 1) pcn->alloc[i].allocations_spesh++; else if (allocation_target == 2) pcn->alloc[i].allocations_jit++; ptd->last_counted_allocation = obj; return; } } /* No entry; create one. */ if (pcn->num_alloc == pcn->alloc_alloc) { pcn->alloc_alloc += 8; pcn->alloc = MVM_realloc(pcn->alloc, pcn->alloc_alloc * sizeof(MVMProfileAllocationCount)); } pcn->alloc[pcn->num_alloc].type = what; pcn->alloc[pcn->num_alloc].allocations_interp = allocation_target == 0; pcn->alloc[pcn->num_alloc].allocations_spesh = allocation_target == 1; pcn->alloc[pcn->num_alloc].allocations_jit = allocation_target == 2; ptd->last_counted_allocation = obj; pcn->num_alloc++; } } }
/* Tries to intern the callsite, freeing and updating the one passed in and * replacing it with an already interned one if we find it. */ MVM_PUBLIC void MVM_callsite_try_intern(MVMThreadContext *tc, MVMCallsite **cs_ptr) { MVMCallsiteInterns *interns = tc->instance->callsite_interns; MVMCallsite *cs = *cs_ptr; MVMint32 num_flags = cs->flag_count; MVMint32 num_nameds = MVM_callsite_num_nameds(tc, cs); MVMint32 i, found; /* Can't intern anything with flattening. */ if (cs->has_flattening) return; /* Also can't intern past the max arity. */ if (num_flags >= MVM_INTERN_ARITY_LIMIT) return; /* Can intern things with nameds, provided we know the names. */ if (num_nameds > 0 && !cs->arg_names) return; /* Obtain mutex protecting interns store. */ uv_mutex_lock(&tc->instance->mutex_callsite_interns); /* Search for a match. */ found = 0; for (i = 0; i < interns->num_by_arity[num_flags]; i++) { if (callsites_equal(tc, interns->by_arity[num_flags][i], cs, num_flags, num_nameds)) { /* Got a match! Free the one we were passed and replace it with * the interned one. */ if (num_flags) MVM_free(cs->arg_flags); MVM_free(cs->arg_names); MVM_free(cs); *cs_ptr = interns->by_arity[num_flags][i]; found = 1; break; } } /* If it wasn't found, store it for the future. */ if (!found) { if (interns->num_by_arity[num_flags] % 8 == 0) { if (interns->num_by_arity[num_flags]) interns->by_arity[num_flags] = MVM_realloc( interns->by_arity[num_flags], sizeof(MVMCallsite *) * (interns->num_by_arity[num_flags] + 8)); else interns->by_arity[num_flags] = MVM_malloc(sizeof(MVMCallsite *) * 8); } interns->by_arity[num_flags][interns->num_by_arity[num_flags]++] = cs; cs->is_interned = 1; } /* Finally, release mutex. */ uv_mutex_unlock(&tc->instance->mutex_callsite_interns); }
static void append(DumpStr *ds, char *to_add) { size_t len = strlen(to_add); if (ds->pos + len >= ds->alloc) { ds->alloc *= 4; if (ds->pos + len >= ds->alloc) ds->alloc += len; ds->buffer = MVM_realloc(ds->buffer, ds->alloc); } memcpy(ds->buffer + ds->pos, to_add, len); ds->pos += len; }
static void push_key_to_stack(collation_stack *stack, MVMuint32 primary, MVMuint32 secondary, MVMuint32 tertiary) {\ stack->stack_top++; if (stack->stack_size <= stack->stack_top) { stack->keys = MVM_realloc(stack->keys, (stack->stack_size + initial_stack_size) * sizeof(collation_stack)); stack->stack_size += initial_stack_size; } stack->keys[stack->stack_top].s.primary = primary; stack->keys[stack->stack_top].s.secondary = secondary; stack->keys[stack->stack_top].s.tertiary = tertiary; }
/* Walks through the per-thread finalize queues, identifying objects that * should be finalized, pushing them onto a finalize list, and then marking * that list entry. Assumes the world is stopped. */ static void add_to_finalizing(MVMThreadContext *tc, MVMObject *obj) { if (tc->num_finalizing == tc->alloc_finalizing) { if (tc->alloc_finalizing) tc->alloc_finalizing *= 2; else tc->alloc_finalizing = 64; tc->finalizing = MVM_realloc(tc->finalizing, sizeof(MVMCollectable **) * tc->alloc_finalizing); } tc->finalizing[tc->num_finalizing] = obj; tc->num_finalizing++; }
/* Adds an object we've just allocated to the queue of those with finalizers * that will need calling upon collection. */ void MVM_gc_finalize_add_to_queue(MVMThreadContext *tc, MVMObject *obj) { if (tc->num_finalize == tc->alloc_finalize) { if (tc->alloc_finalize) tc->alloc_finalize *= 2; else tc->alloc_finalize = 64; tc->finalize = MVM_realloc(tc->finalize, sizeof(MVMCollectable **) * tc->alloc_finalize); } tc->finalize[tc->num_finalize] = obj; tc->num_finalize++; }
/* Adds a location holding a collectable object to the permanent list of GC * roots, so that it will always be marked and never die. Note that the * address of the collectable must be passed, since it will need to be * updated. */ void MVM_gc_root_add_permanent_desc(MVMThreadContext *tc, MVMCollectable **obj_ref, char *description) { if (obj_ref == NULL) MVM_panic(MVM_exitcode_gcroots, "Illegal attempt to add null object address as a permanent root"); uv_mutex_lock(&tc->instance->mutex_permroots); /* Allocate extra permanent root space if needed. */ if (tc->instance->num_permroots == tc->instance->alloc_permroots) { tc->instance->alloc_permroots *= 2; tc->instance->permroots = MVM_realloc(tc->instance->permroots, sizeof(MVMCollectable **) * tc->instance->alloc_permroots); tc->instance->permroot_descriptions = MVM_realloc( tc->instance->permroot_descriptions, sizeof(char *) * tc->instance->alloc_permroots); } /* Add this one to the list. */ tc->instance->permroots[tc->instance->num_permroots] = obj_ref; tc->instance->permroot_descriptions[tc->instance->num_permroots] = description; tc->instance->num_permroots++; uv_mutex_unlock(&tc->instance->mutex_permroots); }
/* Called when we take a continuation. Leaves the static frames from the point * of view of the profiler, and saves each of them. */ MVMProfileContinuationData * MVM_profile_log_continuation_control(MVMThreadContext *tc, MVMFrame *root_frame) { MVMProfileThreadData *ptd = get_thread_data(tc); MVMProfileContinuationData *cd = MVM_malloc(sizeof(MVMProfileContinuationData)); MVMStaticFrame **sfs = NULL; MVMuint64 *modes = NULL; MVMFrame *cur_frame = tc->cur_frame; MVMuint64 alloc_sfs = 0; MVMuint64 num_sfs = 0; MVMFrame *last_frame; do { MVMProfileCallNode *lpcn; do { MVMProfileCallNode *pcn = ptd->current_call; if (!pcn) MVM_panic(1, "Profiler lost sequence in continuation control"); if (num_sfs == alloc_sfs) { alloc_sfs += 16; sfs = MVM_realloc(sfs, alloc_sfs * sizeof(MVMStaticFrame *)); modes = MVM_realloc(modes, alloc_sfs * sizeof(MVMuint64)); } sfs[num_sfs] = pcn->sf; modes[num_sfs] = pcn->entry_mode; num_sfs++; lpcn = pcn; log_exit(tc, 1); } while (lpcn->sf != cur_frame->static_info); last_frame = cur_frame; cur_frame = cur_frame->caller; } while (last_frame != root_frame); cd->sfs = sfs; cd->num_sfs = num_sfs; cd->modes = modes; return cd; }
/* Log that we've entered a native routine */ void MVM_profile_log_enter_native(MVMThreadContext *tc, MVMObject *nativecallsite) { MVMProfileThreadData *ptd = get_thread_data(tc); MVMProfileCallNode *pcn = NULL; MVMNativeCallBody *callbody; MVMuint32 i; /* We locate the right call node by looking at sf being NULL and the * native_target_name matching our intended target. */ callbody = MVM_nativecall_get_nc_body(tc, nativecallsite); if (ptd->current_call) for (i = 0; i < ptd->current_call->num_succ; i++) if (ptd->current_call->succ[i]->sf == NULL) if (strcmp(callbody->sym_name, ptd->current_call->succ[i]->native_target_name) == 0) { pcn = ptd->current_call->succ[i]; break; } /* If we didn't find a call graph node, then create one and add it to the * graph. */ if (!pcn) { pcn = MVM_calloc(1, sizeof(MVMProfileCallNode)); pcn->native_target_name = callbody->sym_name; if (ptd->current_call) { MVMProfileCallNode *pred = ptd->current_call; pcn->pred = pred; if (pred->num_succ == pred->alloc_succ) { pred->alloc_succ += 8; pred->succ = MVM_realloc(pred->succ, pred->alloc_succ * sizeof(MVMProfileCallNode *)); } pred->succ[pred->num_succ] = pcn; pred->num_succ++; } else { if (!ptd->call_graph) ptd->call_graph = pcn; } } /* Increment entry counts. */ pcn->total_entries++; pcn->entry_mode = 0; /* Log entry time; clear skip time. */ pcn->cur_entry_time = uv_hrtime(); pcn->cur_skip_time = 0; /* The current call graph node becomes this one. */ ptd->current_call = pcn; }
/* Resizes the handlers table, making a copy if needed. */ static void resize_handlers_table(MVMThreadContext *tc, MVMSpeshGraph *inliner, MVMuint32 new_handler_count) { if (inliner->handlers == inliner->sf->body.handlers) { /* Original handlers table; need a copy. */ MVMFrameHandler *new_handlers = MVM_malloc(new_handler_count * sizeof(MVMFrameHandler)); memcpy(new_handlers, inliner->handlers, inliner->num_handlers * sizeof(MVMFrameHandler)); inliner->handlers = new_handlers; } else { /* Probably already did some inlines into this frame; resize. */ inliner->handlers = MVM_realloc(inliner->handlers, new_handler_count * sizeof(MVMFrameHandler)); } }
/* If we have the job of doing GC for a thread, we add it to our work * list. */ static void add_work(MVMThreadContext *tc, MVMThreadContext *stolen) { MVMint32 i; for (i = 0; i < tc->gc_work_count; i++) if (tc->gc_work[i].tc == stolen) return; if (tc->gc_work == NULL) { tc->gc_work_size = 16; tc->gc_work = MVM_malloc(tc->gc_work_size * sizeof(MVMWorkThread)); } else if (tc->gc_work_count == tc->gc_work_size) { tc->gc_work_size *= 2; tc->gc_work = MVM_realloc(tc->gc_work, tc->gc_work_size * sizeof(MVMWorkThread)); } tc->gc_work[tc->gc_work_count++].tc = stolen; }
/* Logs the start of a GC run. */ void MVM_profiler_log_gc_start(MVMThreadContext *tc, MVMuint32 full) { MVMProfileThreadData *ptd = get_thread_data(tc); /* Make a new entry in the GCs. We use the cleared_bytes to store the * maximum that could be cleared, and after GC is done will subtract * retained bytes and promoted bytes. */ if (ptd->num_gcs == ptd->alloc_gcs) { ptd->alloc_gcs += 16; ptd->gcs = MVM_realloc(ptd->gcs, ptd->alloc_gcs * sizeof(MVMProfileGC)); } ptd->gcs[ptd->num_gcs].full = full; ptd->gcs[ptd->num_gcs].cleared_bytes = (char *)tc->nursery_alloc - (char *)tc->nursery_tospace; /* Record start time. */ ptd->cur_gc_start_time = uv_hrtime(); }
/* Adds a codepoint into the buffer, making sure there's space. */ static void add_codepoint_to_buffer(MVMThreadContext *tc, MVMNormalizer *n, MVMCodepoint cp) { if (n->buffer_end == n->buffer_size) { if (n->buffer_start != 0) { MVMint32 shuffle = n->buffer_start; MVMint32 to_move = n->buffer_end - n->buffer_start; memmove(n->buffer, n->buffer + n->buffer_start, to_move * sizeof(MVMCodepoint)); n->buffer_start = 0; n->buffer_end -= shuffle; n->buffer_norm_end -= shuffle; } else { n->buffer_size *= 2; n->buffer = MVM_realloc(n->buffer, n->buffer_size * sizeof(MVMCodepoint)); } } n->buffer[n->buffer_end++] = cp; }
static void append_string(char **out, MVMuint32 *size, MVMuint32 *length, char *str, ...) { char string[line_length]; MVMuint32 len; va_list args; va_start(args, str); vsnprintf(string, line_length, str, args); va_end(args); len = strlen(string); if (*length + len > *size) { while (*length + len > *size) *size = *size * 2; *out = MVM_realloc(*out, *size); } memcpy(*out + *length, string, len); *length = *length + len; }
/* Adds work to list of items to pass over to another thread, and if we * reach the pass threshold then does the passing. */ static void pass_work_item(MVMThreadContext *tc, WorkToPass *wtp, MVMCollectable **item_ptr) { ThreadWork *target_info = NULL; MVMuint32 target = (*item_ptr)->owner; MVMuint32 j; MVMInstance *i = tc->instance; /* Find any existing thread work passing list for the target. */ if (target == 0) MVM_panic(MVM_exitcode_gcnursery, "Internal error: zeroed target thread ID in work pass"); for (j = 0; j < wtp->num_target_threads; j++) { if (wtp->target_work[j].target == target) { target_info = &wtp->target_work[j]; break; } } /* If there's no entry for this target, create one. */ if (target_info == NULL) { wtp->num_target_threads++; wtp->target_work = MVM_realloc(wtp->target_work, wtp->num_target_threads * sizeof(ThreadWork)); target_info = &wtp->target_work[wtp->num_target_threads - 1]; target_info->target = target; target_info->work = NULL; } /* See if there's a currently active list; create it if not. */ if (!target_info->work) { target_info->work = calloc(sizeof(MVMGCPassedWork), 1); } /* Add this item to the work list. */ target_info->work->items[target_info->work->num_items] = item_ptr; target_info->work->num_items++; /* If we've hit the limit, pass this work to the target thread. */ if (target_info->work->num_items == MVM_GC_PASS_WORK_SIZE) { push_work_to_thread_in_tray(tc, target, target_info->work); target_info->work = NULL; } }
/* Records a de-optimization annotation and mapping pair. */ static void add_deopt_annotation(MVMThreadContext *tc, MVMSpeshGraph *g, MVMSpeshIns *ins_node, MVMuint8 *pc, MVMint32 type) { /* Add an the annotations. */ MVMSpeshAnn *ann = MVM_spesh_alloc(tc, g, sizeof(MVMSpeshAnn)); ann->type = type; ann->data.deopt_idx = g->num_deopt_addrs; ann->next = ins_node->annotations; ins_node->annotations = ann; /* Record PC in the deopt entries table. */ if (g->num_deopt_addrs == g->alloc_deopt_addrs) { g->alloc_deopt_addrs += 4; if (g->deopt_addrs) g->deopt_addrs = MVM_realloc(g->deopt_addrs, g->alloc_deopt_addrs * sizeof(MVMint32) * 2); else g->deopt_addrs = MVM_malloc(g->alloc_deopt_addrs * sizeof(MVMint32) * 2); } g->deopt_addrs[2 * g->num_deopt_addrs] = pc - g->bytecode; g->num_deopt_addrs++; }
MVMObject * collectables_str(MVMThreadContext *tc, MVMHeapSnapshot *s) { /* Produces ; separated sequences of: * kind,type_or_frame_index,collectable_size,unmanaged_size,refs_start,num_refs * All of which are integers. */ MVMObject *result; size_t buffer_size = 20 * s->num_collectables; size_t buffer_pos = 0; char *buffer = MVM_malloc(buffer_size); MVMuint64 i; for (i = 0; i < s->num_collectables; i++) { char tmp[256]; int item_chars = snprintf(tmp, 256, "%"PRIu16",%"PRId32",%"PRIu16",%"PRIu64",%"PRIu64",%"PRIu32";", s->collectables[i].kind, s->collectables[i].type_or_frame_index, s->collectables[i].collectable_size, s->collectables[i].unmanaged_size, s->collectables[i].num_refs ? s->collectables[i].refs_start : (MVMuint64)0, s->collectables[i].num_refs); if (item_chars < 0) MVM_panic(1, "Failed to save collectable in heap snapshot"); if (buffer_pos + item_chars >= buffer_size) { buffer_size += 4096; buffer = MVM_realloc(buffer, buffer_size); } memcpy(buffer + buffer_pos, tmp, item_chars); buffer_pos += item_chars; } if (buffer_pos > 1) buffer[buffer_pos - 1] = 0; /* Cut off the trailing ; for ease of parsing */ buffer[buffer_pos] = 0; result = box_s(tc, vmstr(tc, buffer)); MVM_free(buffer); return result; }
/* Ensures that a given compilation unit has access to the specified extop. */ static void demand_extop(MVMThreadContext *tc, MVMCompUnit *target_cu, MVMCompUnit *source_cu, const MVMOpInfo *info) { MVMExtOpRecord *extops; MVMuint16 i, num_extops; MVM_reentrantmutex_lock(tc, (MVMReentrantMutex *)target_cu->body.update_mutex); /* See if the target compunit already has the extop. */ extops = target_cu->body.extops; num_extops = target_cu->body.num_extops; for (i = 0; i < num_extops; i++) if (extops[i].info == info) { MVM_reentrantmutex_unlock(tc, (MVMReentrantMutex *)target_cu->body.update_mutex); return; } /* If not, need to add it. Locate it in the source CU. */ extops = source_cu->body.extops; num_extops = source_cu->body.num_extops; for (i = 0; i < num_extops; i++) { if (extops[i].info == info) { MVMuint32 size = (target_cu->body.num_extops + 1) * sizeof(MVMExtOpRecord); target_cu->body.extops = target_cu->body.extops ? MVM_realloc(target_cu->body.extops, size) : MVM_malloc(size); memcpy(&target_cu->body.extops[target_cu->body.num_extops], &extops[i], sizeof(MVMExtOpRecord)); target_cu->body.num_extops++; MVM_reentrantmutex_unlock(tc, (MVMReentrantMutex *)target_cu->body.update_mutex); return; } } /* Didn't find it; should be impossible. */ MVM_reentrantmutex_unlock(tc, (MVMReentrantMutex *)target_cu->body.update_mutex); MVM_oops(tc, "Spesh: inline failed to find source CU extop entry"); }
static void instrument_graph(MVMThreadContext *tc, MVMSpeshGraph *g) { MVMSpeshBB *bb = g->entry->linear_next; MVMuint16 array_slot = 0; MVMint32 last_line_number = -2; MVMint32 last_filename = -1; MVMuint16 allocd_slots = g->num_bbs * 2; char *line_report_store = MVM_calloc(allocd_slots, sizeof(char)); /* Since we don't know the right size for the line report store * up front, we will have to realloc it along the way. After that * we havee to fix up the arguments to the coverage log instructions */ MVMuint32 fixup_alloc = g->num_bbs * 2; MVMuint32 fixup_elems = 0; MVMuint32 fixup_idx; /* for iterating over the fixup array */ MVMSpeshIns **to_fixup = MVM_malloc(fixup_alloc * sizeof(MVMSpeshIns*)); while (bb) { MVMSpeshIns *ins = bb->first_ins; MVMSpeshIns *log_ins; MVMBytecodeAnnotation *bbba = MVM_bytecode_resolve_annotation(tc, &g->sf->body, bb->initial_pc); MVMint64 line_number; MVMint64 filename_string_index; if (bbba) { line_number = bbba->line_number; filename_string_index = bbba->filename_string_heap_index; MVM_free(bbba); } else { line_number = -1; bb = bb->linear_next; continue; } /* skip PHI instructions, to make sure PHI only occur uninterrupted after start-of-bb */ while (ins && ins->info->opcode == MVM_SSA_PHI) { ins = ins->next; } if (!ins) ins = bb->last_ins; /* Jumplists require the target BB to start in the goto op. * We must not break this, or we cause the interpreter to derail */ if (bb->last_ins->info->opcode == MVM_OP_jumplist) { MVMint16 to_skip = bb->num_succ; for (; to_skip > 0; to_skip--) { bb = bb->linear_next; } continue; } log_ins = MVM_spesh_alloc(tc, g, sizeof(MVMSpeshIns)); log_ins->info = MVM_op_get_op(MVM_OP_coverage_log); log_ins->operands = MVM_spesh_alloc(tc, g, 4 * sizeof(MVMSpeshOperand)); log_ins->operands[0].lit_str_idx = filename_string_index; log_ins->operands[1].lit_i32 = line_number; if (last_line_number == line_number && last_filename == filename_string_index) { /* Consecutive BBs with the same line number and filename should * share one "already reported" slot. */ log_ins->operands[2].lit_i32 = array_slot; } else { log_ins->operands[2].lit_i32 = array_slot++; last_line_number = line_number; last_filename = filename_string_index; if (array_slot == allocd_slots) { allocd_slots *= 2; line_report_store = MVM_realloc(line_report_store, sizeof(char) * allocd_slots); } } to_fixup[fixup_elems++] = log_ins; if (fixup_elems == fixup_alloc) { fixup_alloc *= 2; to_fixup = MVM_realloc(to_fixup, sizeof(MVMSpeshIns*) * fixup_alloc); } MVM_spesh_manipulate_insert_ins(tc, bb, ins, log_ins); /* Now go through instructions to see if any are annotated with a * precise filename/lineno as well. */ while (ins) { MVMSpeshAnn *ann = ins->annotations; while (ann) { if (ann->type == MVM_SPESH_ANN_LINENO) { /* We are very likely to have one instruction here that has * the same annotation as the bb itself. We skip that one.*/ if (ann->data.lineno.line_number == line_number && ann->data.lineno.filename_string_index == filename_string_index) { break; } log_ins = MVM_spesh_alloc(tc, g, sizeof(MVMSpeshIns)); log_ins->info = MVM_op_get_op(MVM_OP_coverage_log); log_ins->operands = MVM_spesh_alloc(tc, g, 4 * sizeof(MVMSpeshOperand)); log_ins->operands[0].lit_str_idx = ann->data.lineno.filename_string_index; log_ins->operands[1].lit_i32 = ann->data.lineno.line_number; log_ins->operands[2].lit_i32 = array_slot++; if (array_slot == allocd_slots) { allocd_slots *= 2; line_report_store = MVM_realloc(line_report_store, sizeof(char) * allocd_slots); } to_fixup[fixup_elems++] = log_ins; if (fixup_elems == fixup_alloc) { fixup_alloc *= 2; to_fixup = MVM_realloc(to_fixup, sizeof(MVMSpeshIns*) * fixup_alloc); } break; } ann = ann->next; } ins = ins->next; } bb = bb->linear_next; } line_report_store = MVM_realloc(line_report_store, sizeof(char) * (array_slot + 1)); for (fixup_idx = 0; fixup_idx < fixup_elems; fixup_idx++) { MVMSpeshIns *ins = to_fixup[fixup_idx]; ins->operands[3].lit_i64 = (uintptr_t)line_report_store; } if (array_slot == 0) { MVM_free(line_report_store); } MVM_free(to_fixup); }
/* Encodes the specified substring to latin-1. Anything outside of latin-1 range * will become a ?. The result string is NULL terminated, but the specified * size is the non-null part. */ char * MVM_string_latin1_encode_substr(MVMThreadContext *tc, MVMString *str, MVMuint64 *output_size, MVMint64 start, MVMint64 length, MVMString *replacement, MVMint32 translate_newlines) { /* Latin-1 is a single byte encoding, but \r\n is a 2-byte grapheme, so we * may have to resize as we go. */ MVMuint32 startu = (MVMuint32)start; MVMStringIndex strgraphs = MVM_string_graphs(tc, str); MVMuint32 lengthu = (MVMuint32)(length == -1 ? strgraphs - startu : length); MVMuint8 *result; size_t result_alloc; MVMuint8 *repl_bytes = NULL; MVMuint64 repl_length; /* must check start first since it's used in the length check */ if (start < 0 || start > strgraphs) MVM_exception_throw_adhoc(tc, "start out of range"); if (length < -1 || start + lengthu > strgraphs) MVM_exception_throw_adhoc(tc, "length out of range"); if (replacement) repl_bytes = (MVMuint8 *) MVM_string_latin1_encode_substr(tc, replacement, &repl_length, 0, -1, NULL, translate_newlines); result_alloc = lengthu; result = MVM_malloc(result_alloc + 1); if (str->body.storage_type == MVM_STRING_GRAPHEME_ASCII) { /* No encoding needed; directly copy. */ memcpy(result, str->body.storage.blob_ascii, lengthu); result[lengthu] = 0; if (output_size) *output_size = lengthu; } else { MVMuint32 i = 0; MVMCodepointIter ci; MVM_string_ci_init(tc, &ci, str, translate_newlines); while (MVM_string_ci_has_more(tc, &ci)) { MVMCodepoint ord = MVM_string_ci_get_codepoint(tc, &ci); if (i == result_alloc) { result_alloc += 8; result = MVM_realloc(result, result_alloc + 1); } if (ord >= 0 && ord <= 255) { result[i] = (MVMuint8)ord; i++; } else if (replacement) { if (repl_length >= result_alloc || i >= result_alloc - repl_length) { result_alloc += repl_length; result = MVM_realloc(result, result_alloc + 1); } memcpy(result + i, repl_bytes, repl_length); i += repl_length; } else { MVM_free(result); MVM_free(repl_bytes); MVM_exception_throw_adhoc(tc, "Error encoding Latin-1 string: could not encode codepoint %d", ord); } } result[i] = 0; if (output_size) *output_size = i; } MVM_free(repl_bytes); return (char *)result; }
/* Writes instructions within a basic block boundary. */ void write_instructions(MVMThreadContext *tc, MVMSpeshGraph *g, SpeshWriterState *ws, MVMSpeshBB *bb) { MVMSpeshIns *ins = bb->first_ins; while (ins) { MVMint32 i; /* Process any annotations. */ MVMSpeshAnn *ann = ins->annotations; MVMSpeshAnn *deopt_one_ann = NULL; MVMSpeshAnn *deopt_all_ann = NULL; MVMSpeshAnn *deopt_inline_ann = NULL; while (ann) { switch (ann->type) { case MVM_SPESH_ANN_FH_START: ws->handlers[ann->data.frame_handler_index].start_offset = ws->bytecode_pos; break; case MVM_SPESH_ANN_FH_END: ws->handlers[ann->data.frame_handler_index].end_offset = ws->bytecode_pos; break; case MVM_SPESH_ANN_FH_GOTO: ws->handlers[ann->data.frame_handler_index].goto_offset = ws->bytecode_pos; break; case MVM_SPESH_ANN_DEOPT_ONE_INS: deopt_one_ann = ann; break; case MVM_SPESH_ANN_DEOPT_ALL_INS: deopt_all_ann = ann; break; case MVM_SPESH_ANN_INLINE_START: g->inlines[ann->data.inline_idx].start = ws->bytecode_pos; break; case MVM_SPESH_ANN_INLINE_END: g->inlines[ann->data.inline_idx].end = ws->bytecode_pos; break; case MVM_SPESH_ANN_DEOPT_INLINE: deopt_inline_ann = ann; break; case MVM_SPESH_ANN_DEOPT_OSR: g->deopt_addrs[2 * ann->data.deopt_idx + 1] = ws->bytecode_pos; break; } ann = ann->next; } if (ins->info->opcode != MVM_SSA_PHI) { /* Real instruction, not a phi. Emit opcode. */ if (ins->info->opcode == (MVMuint16)-1) { /* Ext op; resolve. */ MVMExtOpRecord *extops = g->sf->body.cu->body.extops; MVMuint16 num_extops = g->sf->body.cu->body.num_extops; MVMint32 found = 0; for (i = 0; i < num_extops; i++) { if (extops[i].info == ins->info) { write_int16(ws, MVM_OP_EXT_BASE + i); found = 1; break; } } if (!found) MVM_exception_throw_adhoc(tc, "Spesh: failed to resolve extop in code-gen"); } else { /* Core op. */ write_int16(ws, ins->info->opcode); } /* Write out operands. */ for (i = 0; i < ins->info->num_operands; i++) { MVMuint8 flags = ins->info->operands[i]; MVMuint8 rw = flags & MVM_operand_rw_mask; switch (rw) { case MVM_operand_read_reg: case MVM_operand_write_reg: write_int16(ws, ins->operands[i].reg.orig); break; case MVM_operand_read_lex: case MVM_operand_write_lex: write_int16(ws, ins->operands[i].lex.idx); write_int16(ws, ins->operands[i].lex.outers); break; case MVM_operand_literal: { MVMuint8 type = flags & MVM_operand_type_mask; switch (type) { case MVM_operand_int8: write_int8(ws, ins->operands[i].lit_i8); break; case MVM_operand_int16: write_int16(ws, ins->operands[i].lit_i16); break; case MVM_operand_int32: write_int32(ws, ins->operands[i].lit_i32); break; case MVM_operand_int64: write_int64(ws, ins->operands[i].lit_i64); break; case MVM_operand_num32: write_num32(ws, ins->operands[i].lit_n32); break; case MVM_operand_num64: write_num64(ws, ins->operands[i].lit_n64); break; case MVM_operand_callsite: write_int16(ws, ins->operands[i].callsite_idx); break; case MVM_operand_coderef: write_int16(ws, ins->operands[i].coderef_idx); break; case MVM_operand_str: write_int32(ws, ins->operands[i].lit_str_idx); break; case MVM_operand_ins: { MVMint32 offset = ws->bb_offsets[ins->operands[i].ins_bb->idx]; if (offset >= 0) { /* Already know where it is, so just write it. */ write_int32(ws, offset); } else { /* Need to fix it up. */ if (ws->num_fixups == ws->alloc_fixups) { ws->alloc_fixups *= 2; ws->fixup_locations = MVM_realloc(ws->fixup_locations, ws->alloc_fixups * sizeof(MVMint32)); ws->fixup_bbs = MVM_realloc(ws->fixup_bbs, ws->alloc_fixups * sizeof(MVMSpeshBB *)); } ws->fixup_locations[ws->num_fixups] = ws->bytecode_pos; ws->fixup_bbs[ws->num_fixups] = ins->operands[i].ins_bb; write_int32(ws, 0); ws->num_fixups++; } break; } case MVM_operand_spesh_slot: write_int16(ws, ins->operands[i].lit_i16); break; default: MVM_exception_throw_adhoc(tc, "Spesh: unknown operand type %d in codegen (op %s)", (int)type, ins->info->name); } } break; default: MVM_exception_throw_adhoc(tc, "Spesh: unknown operand type in codegen"); } } } /* If there was a deopt point annotation, update table. */ if (deopt_one_ann) g->deopt_addrs[2 * deopt_one_ann->data.deopt_idx + 1] = ws->bytecode_pos; if (deopt_all_ann) g->deopt_addrs[2 * deopt_all_ann->data.deopt_idx + 1] = ws->bytecode_pos; if (deopt_inline_ann) g->deopt_addrs[2 * deopt_inline_ann->data.deopt_idx + 1] = ws->bytecode_pos; ins = ins->next; } }
/* Write functions; all native endian. */ static void ensure_space(SpeshWriterState *ws, int bytes) { if (ws->bytecode_pos + bytes >= ws->bytecode_alloc) { ws->bytecode_alloc *= 2; ws->bytecode = MVM_realloc(ws->bytecode, ws->bytecode_alloc); } }
/* Merges the inlinee's spesh graph into the inliner. */ static void merge_graph(MVMThreadContext *tc, MVMSpeshGraph *inliner, MVMSpeshGraph *inlinee, MVMCode *inlinee_code, MVMSpeshIns *invoke_ins) { MVMSpeshFacts **merged_facts; MVMuint16 *merged_fact_counts; MVMint32 i, total_inlines, orig_deopt_addrs; MVMSpeshBB *inlinee_first_bb = NULL, *inlinee_last_bb = NULL; MVMint32 active_handlers_at_invoke = 0; /* If the inliner and inlinee are from different compilation units, we * potentially have to fix up extra things. */ MVMint32 same_comp_unit = inliner->sf->body.cu == inlinee->sf->body.cu; /* Renumber the locals, lexicals, and basic blocks of the inlinee; also * re-write any indexes in annotations that need it. */ MVMSpeshBB *bb = inlinee->entry; while (bb) { MVMSpeshIns *ins = bb->first_ins; while (ins) { MVMuint16 opcode = ins->info->opcode; MVMSpeshAnn *ann = ins->annotations; while (ann) { switch (ann->type) { case MVM_SPESH_ANN_FH_START: case MVM_SPESH_ANN_FH_END: case MVM_SPESH_ANN_FH_GOTO: ann->data.frame_handler_index += inliner->num_handlers; break; case MVM_SPESH_ANN_DEOPT_INLINE: ann->data.deopt_idx += inliner->num_deopt_addrs; break; case MVM_SPESH_ANN_INLINE_START: case MVM_SPESH_ANN_INLINE_END: ann->data.inline_idx += inliner->num_inlines; break; } ann = ann->next; } if (opcode == MVM_SSA_PHI) { for (i = 0; i < ins->info->num_operands; i++) ins->operands[i].reg.orig += inliner->num_locals; } else { for (i = 0; i < ins->info->num_operands; i++) { MVMuint8 flags = ins->info->operands[i]; switch (flags & MVM_operand_rw_mask) { case MVM_operand_read_reg: case MVM_operand_write_reg: ins->operands[i].reg.orig += inliner->num_locals; break; case MVM_operand_read_lex: case MVM_operand_write_lex: ins->operands[i].lex.idx += inliner->num_lexicals; break; default: { MVMuint32 type = flags & MVM_operand_type_mask; if (type == MVM_operand_spesh_slot) { ins->operands[i].lit_i16 += inliner->num_spesh_slots; } else if (type == MVM_operand_callsite) { if (!same_comp_unit) fix_callsite(tc, inliner, inlinee, &(ins->operands[i])); } else if (type == MVM_operand_coderef) { if (!same_comp_unit) fix_coderef(tc, inliner, inlinee, &(ins->operands[i])); } else if (type == MVM_operand_str) { if (!same_comp_unit) fix_str(tc, inliner, inlinee, &(ins->operands[i])); } break; } } } } ins = ins->next; } bb->idx += inliner->num_bbs - 1; /* -1 as we won't include entry */ bb->inlined = 1; if (!bb->linear_next) inlinee_last_bb = bb; bb = bb->linear_next; } /* Incorporate the basic blocks by concatening them onto the end of the * linear_next chain of the inliner; skip the inlinee's fake entry BB. */ bb = inliner->entry; while (bb) { if (!bb->linear_next) { /* Found the end; insert and we're done. */ bb->linear_next = inlinee_first_bb = inlinee->entry->linear_next; bb = NULL; } else { bb = bb->linear_next; } } /* Merge facts. */ merged_facts = MVM_spesh_alloc(tc, inliner, (inliner->num_locals + inlinee->num_locals) * sizeof(MVMSpeshFacts *)); memcpy(merged_facts, inliner->facts, inliner->num_locals * sizeof(MVMSpeshFacts *)); memcpy(merged_facts + inliner->num_locals, inlinee->facts, inlinee->num_locals * sizeof(MVMSpeshFacts *)); inliner->facts = merged_facts; merged_fact_counts = MVM_spesh_alloc(tc, inliner, (inliner->num_locals + inlinee->num_locals) * sizeof(MVMuint16)); memcpy(merged_fact_counts, inliner->fact_counts, inliner->num_locals * sizeof(MVMuint16)); memcpy(merged_fact_counts + inliner->num_locals, inlinee->fact_counts, inlinee->num_locals * sizeof(MVMuint16)); inliner->fact_counts = merged_fact_counts; /* Copy over spesh slots. */ for (i = 0; i < inlinee->num_spesh_slots; i++) MVM_spesh_add_spesh_slot(tc, inliner, inlinee->spesh_slots[i]); /* If they are from separate compilation units, make another pass through * to fix up on wvals. Note we can't do this in the first pass as we must * not modify the spesh slots once we've got started with the rewrites. * Now we've resolved all that, we're good to map wvals elsewhere into * some extra spesh slots. */ if (!same_comp_unit) { bb = inlinee->entry; while (bb) { MVMSpeshIns *ins = bb->first_ins; while (ins) { MVMuint16 opcode = ins->info->opcode; if (opcode == MVM_OP_wval || opcode == MVM_OP_wval_wide) fix_wval(tc, inliner, inlinee, ins); ins = ins->next; } bb = bb->linear_next; } } /* Merge de-opt tables, if needed. */ orig_deopt_addrs = inliner->num_deopt_addrs; if (inlinee->num_deopt_addrs) { assert(inlinee->deopt_addrs != inliner->deopt_addrs); inliner->alloc_deopt_addrs += inlinee->alloc_deopt_addrs; if (inliner->deopt_addrs) inliner->deopt_addrs = MVM_realloc(inliner->deopt_addrs, inliner->alloc_deopt_addrs * sizeof(MVMint32) * 2); else inliner->deopt_addrs = MVM_malloc(inliner->alloc_deopt_addrs * sizeof(MVMint32) * 2); memcpy(inliner->deopt_addrs + inliner->num_deopt_addrs * 2, inlinee->deopt_addrs, inlinee->alloc_deopt_addrs * sizeof(MVMint32) * 2); inliner->num_deopt_addrs += inlinee->num_deopt_addrs; } /* Merge inlines table, and add us an entry too. */ total_inlines = inliner->num_inlines + inlinee->num_inlines + 1; inliner->inlines = inliner->num_inlines ? MVM_realloc(inliner->inlines, total_inlines * sizeof(MVMSpeshInline)) : MVM_malloc(total_inlines * sizeof(MVMSpeshInline)); memcpy(inliner->inlines + inliner->num_inlines, inlinee->inlines, inlinee->num_inlines * sizeof(MVMSpeshInline)); for (i = inliner->num_inlines; i < total_inlines - 1; i++) { inliner->inlines[i].locals_start += inliner->num_locals; inliner->inlines[i].lexicals_start += inliner->num_lexicals; inliner->inlines[i].return_deopt_idx += orig_deopt_addrs; } inliner->inlines[total_inlines - 1].code = inlinee_code; inliner->inlines[total_inlines - 1].g = inlinee; inliner->inlines[total_inlines - 1].locals_start = inliner->num_locals; inliner->inlines[total_inlines - 1].lexicals_start = inliner->num_lexicals; switch (invoke_ins->info->opcode) { case MVM_OP_invoke_v: inliner->inlines[total_inlines - 1].res_type = MVM_RETURN_VOID; break; case MVM_OP_invoke_o: inliner->inlines[total_inlines - 1].res_reg = invoke_ins->operands[0].reg.orig; inliner->inlines[total_inlines - 1].res_type = MVM_RETURN_OBJ; break; case MVM_OP_invoke_i: inliner->inlines[total_inlines - 1].res_reg = invoke_ins->operands[0].reg.orig; inliner->inlines[total_inlines - 1].res_type = MVM_RETURN_INT; break; case MVM_OP_invoke_n: inliner->inlines[total_inlines - 1].res_reg = invoke_ins->operands[0].reg.orig; inliner->inlines[total_inlines - 1].res_type = MVM_RETURN_NUM; break; case MVM_OP_invoke_s: inliner->inlines[total_inlines - 1].res_reg = invoke_ins->operands[0].reg.orig; inliner->inlines[total_inlines - 1].res_type = MVM_RETURN_STR; break; default: MVM_oops(tc, "Spesh inline: unknown invoke instruction"); } inliner->inlines[total_inlines - 1].return_deopt_idx = return_deopt_idx(tc, invoke_ins); inliner->num_inlines = total_inlines; /* Create/update per-specialization local and lexical type maps. */ if (!inliner->local_types) { MVMint32 local_types_size = inliner->num_locals * sizeof(MVMuint16); inliner->local_types = MVM_malloc(local_types_size); memcpy(inliner->local_types, inliner->sf->body.local_types, local_types_size); } inliner->local_types = MVM_realloc(inliner->local_types, (inliner->num_locals + inlinee->num_locals) * sizeof(MVMuint16)); memcpy(inliner->local_types + inliner->num_locals, inlinee->local_types ? inlinee->local_types : inlinee->sf->body.local_types, inlinee->num_locals * sizeof(MVMuint16)); if (!inliner->lexical_types) { MVMint32 lexical_types_size = inliner->num_lexicals * sizeof(MVMuint16); inliner->lexical_types = MVM_malloc(lexical_types_size); memcpy(inliner->lexical_types, inliner->sf->body.lexical_types, lexical_types_size); } inliner->lexical_types = MVM_realloc(inliner->lexical_types, (inliner->num_lexicals + inlinee->num_lexicals) * sizeof(MVMuint16)); memcpy(inliner->lexical_types + inliner->num_lexicals, inlinee->lexical_types ? inlinee->lexical_types : inlinee->sf->body.lexical_types, inlinee->num_lexicals * sizeof(MVMuint16)); /* Merge handlers from inlinee. */ if (inlinee->num_handlers) { MVMuint32 total_handlers = inliner->num_handlers + inlinee->num_handlers; resize_handlers_table(tc, inliner, total_handlers); memcpy(inliner->handlers + inliner->num_handlers, inlinee->handlers, inlinee->num_handlers * sizeof(MVMFrameHandler)); for (i = inliner->num_handlers; i < total_handlers; i++) { inliner->handlers[i].block_reg += inliner->num_locals; inliner->handlers[i].label_reg += inliner->num_locals; } } /* If the inliner has handlers in effect at the point of the call that we * are inlining, then we duplicate those and place them surrounding the * inlinee, but with the goto still pointing to the original location. * This means that we can still do a linear scan when searching for an * exception handler, and don't have to try the (costly and fiddly) matter * of trying to traverse the post-inlined call chain. */ if (inliner->sf->body.num_handlers) { /* Walk inliner looking for handlers in effect at the point we hit the * invoke instruction we're currently inlining; also record all of the * instructions where the handler "goto" annotation lives. */ MVMuint32 orig_handlers = inliner->sf->body.num_handlers; MVMuint8 *active = MVM_spesh_alloc(tc, inliner, orig_handlers); MVMSpeshIns **handler_goto_ins = MVM_spesh_alloc(tc, inliner, orig_handlers * sizeof(MVMSpeshIns *)); MVMint32 found_invoke = 0; bb = inliner->entry; while (bb && !bb->inlined) { MVMSpeshIns *ins = bb->first_ins; while (ins) { MVMSpeshAnn *ann = ins->annotations; while (ann) { if (ann->type == MVM_SPESH_ANN_FH_GOTO) { if (ann->data.frame_handler_index < orig_handlers) handler_goto_ins[ann->data.frame_handler_index] = ins; } else if (!found_invoke) { /* Only update these to the point we found the invoke * being inlined, so it serves as a snapshot of what * is active. */ if (ann->type == MVM_SPESH_ANN_FH_START) active[ann->data.frame_handler_index] = 1; else if (ann->type == MVM_SPESH_ANN_FH_END) active[ann->data.frame_handler_index] = 0; } ann = ann->next; } if (ins == invoke_ins) { /* Found it; see if we have any handlers active. If so, we * will continue walking to collect goto annotations. */ found_invoke = 1; for (i = 0; i < orig_handlers; i++) active_handlers_at_invoke += active[i]; if (!active_handlers_at_invoke) break; } ins = ins->next; } if (found_invoke && !active_handlers_at_invoke) break; bb = bb->linear_next; } /* If we found handlers active at the point of invoke, duplicate them * in the handlers table and add annotations. */ if (active_handlers_at_invoke) { MVMuint32 insert_pos = inliner->num_handlers + inlinee->num_handlers; resize_handlers_table(tc, inliner, insert_pos + active_handlers_at_invoke); for (i = orig_handlers - 1; i >= 0; i--) { if (active[i]) { /* Add handler start annotation to first inlinee instruction. */ MVMSpeshAnn *new_ann = MVM_spesh_alloc(tc, inliner, sizeof(MVMSpeshAnn)); new_ann->type = MVM_SPESH_ANN_FH_START; new_ann->data.frame_handler_index = insert_pos; new_ann->next = inlinee_first_bb->first_ins->annotations; inlinee_first_bb->first_ins->annotations = new_ann; /* Add handler end annotation to last inlinee instruction. */ new_ann = MVM_spesh_alloc(tc, inliner, sizeof(MVMSpeshAnn)); new_ann->type = MVM_SPESH_ANN_FH_END; new_ann->data.frame_handler_index = insert_pos; new_ann->next = inlinee_last_bb->last_ins->annotations; inlinee_last_bb->last_ins->annotations = new_ann; /* Add handler goto annotation to original target in inliner. */ new_ann = MVM_spesh_alloc(tc, inliner, sizeof(MVMSpeshAnn)); new_ann->type = MVM_SPESH_ANN_FH_GOTO; new_ann->data.frame_handler_index = insert_pos; new_ann->next = handler_goto_ins[i]->annotations; handler_goto_ins[i]->annotations = new_ann; /* Copy handler entry to new slot. */ memcpy(inliner->handlers + insert_pos, inliner->handlers + i, sizeof(MVMFrameHandler)); insert_pos++; } } } } /* Update total locals, lexicals, basic blocks, and handlers of the * inliner. */ inliner->num_bbs += inlinee->num_bbs - 1; inliner->num_locals += inlinee->num_locals; inliner->num_lexicals += inlinee->num_lexicals; inliner->num_handlers += inlinee->num_handlers + active_handlers_at_invoke; }