void caml_stash_backtrace(value exn, uintnat pc, char * sp, char * trapsp) { frame_descr * d; uintnat h; if (exn != caml_backtrace_last_exn) { caml_backtrace_pos = 0; caml_backtrace_last_exn = exn; } if (caml_backtrace_buffer == NULL) { caml_backtrace_buffer = malloc(BACKTRACE_BUFFER_SIZE * sizeof(code_t)); if (caml_backtrace_buffer == NULL) return; } if (caml_frame_descriptors == NULL) caml_init_frame_descriptors(); while (1) { /* Find the descriptor corresponding to the return address */ h = Hash_retaddr(pc); while(1) { d = caml_frame_descriptors[h]; if (d == 0) return; /* can happen if some code not compiled with -g */ if (d->retaddr == pc) break; h = (h+1) & caml_frame_descriptors_mask; } /* Skip to next frame */ if (d->frame_size != 0xFFFF) { /* Regular frame, store its descriptor in the backtrace buffer */ if (caml_backtrace_pos >= BACKTRACE_BUFFER_SIZE) return; caml_backtrace_buffer[caml_backtrace_pos++] = (code_t) d; #ifndef Stack_grows_upwards sp += (d->frame_size & 0xFFFC); #else sp -= (d->frame_size & 0xFFFC); #endif pc = Saved_return_address(sp); #ifdef Mask_already_scanned pc = Mask_already_scanned(pc); #endif } else { /* Special frame marking the top of a stack chunk for an ML callback. Skip C portion of stack and continue with next ML stack chunk. */ struct caml_context * next_context = Callback_link(sp); sp = next_context->bottom_of_stack; pc = next_context->last_retaddr; /* A null sp means no more ML stack chunks; stop here. */ if (sp == NULL) return; } /* Stop when we reach the current exception handler */ #ifndef Stack_grows_upwards if (sp > trapsp) return; #else if (sp < trapsp) return; #endif } }
void caml_init_gc () { /* uintnat major_heap_size = Bsize_wsize (caml_normalize_heap_increment (caml_params->heap_size_init)); */ caml_max_stack_size = caml_params->init_max_stack_wsz; caml_fiber_wsz = caml_params->init_fiber_wsz; caml_percent_free = norm_pfree (caml_params->init_percent_free); caml_gc_log ("Initial stack limit: %luk bytes", caml_max_stack_size / 1024 * sizeof (value)); caml_setup_eventlog(); caml_gc_phase = Phase_sweep_and_mark_main; #ifdef NATIVE_CODE caml_init_frame_descriptors(); #endif caml_init_domains(caml_params->init_minor_heap_wsz); /* caml_major_heap_increment = major_incr; caml_percent_free = norm_pfree (percent_fr); caml_percent_max = norm_pmax (percent_m); caml_init_major_heap (major_heap_size); caml_gc_message (0x20, "Initial minor heap size: %luk bytes\n", Caml_state->minor_heap_size / 1024); caml_gc_message (0x20, "Initial major heap size: %luk bytes\n", major_heap_size / 1024); caml_gc_message (0x20, "Initial space overhead: %" ARCH_INTNAT_PRINTF_FORMAT "u%%\n", caml_percent_free); caml_gc_message (0x20, "Initial max overhead: %" ARCH_INTNAT_PRINTF_FORMAT "u%%\n", caml_percent_max); if (caml_major_heap_increment > 1000){ caml_gc_message (0x20, "Initial heap increment: %" ARCH_INTNAT_PRINTF_FORMAT "uk words\n", caml_major_heap_increment / 1024); }else{ caml_gc_message (0x20, "Initial heap increment: %" ARCH_INTNAT_PRINTF_FORMAT "u%%\n", caml_major_heap_increment); } caml_gc_message (0x20, "Initial allocation policy: %d\n", caml_allocation_policy); */ }
frame_descr * caml_next_frame_descriptor(uintnat * pc, char ** sp) { frame_descr * d; uintnat h; if (caml_frame_descriptors == NULL) caml_init_frame_descriptors(); while (1) { h = Hash_retaddr(*pc); while (1) { d = caml_frame_descriptors[h]; if (d == 0) return NULL; /* can happen if some code compiled without -g */ if (d->retaddr == *pc) break; h = (h+1) & caml_frame_descriptors_mask; } /* Skip to next frame */ if (d->frame_size != 0xFFFF) { /* Regular frame, update sp/pc and return the frame descriptor */ #ifndef Stack_grows_upwards *sp += (d->frame_size & 0xFFFC); #else *sp -= (d->frame_size & 0xFFFC); #endif *pc = Saved_return_address(*sp); #ifdef Mask_already_scanned *pc = Mask_already_scanned(*pc); #endif return d; } else { /* Special frame marking the top of a stack chunk for an ML callback. Skip C portion of stack and continue with next ML stack chunk. */ struct caml_context * next_context = Callback_link(*sp); *sp = next_context->bottom_of_stack; *pc = next_context->last_retaddr; /* A null sp means no more ML stack chunks; stop here. */ if (*sp == NULL) return NULL; } } }
void caml_scan_stack_high (scanning_action f, value stack, value* stack_high) { char * sp; uintnat retaddr; value * regs; frame_descr * d; uintnat h; int n, ofs; #ifdef Stack_grows_upwards short * p; /* PR#4339: stack offsets are negative in this case */ #else unsigned short * p; #endif value *root; struct caml_context* context; if (caml_frame_descriptors == NULL) caml_init_frame_descriptors(); f(Stack_handle_value(stack), &Stack_handle_value(stack)); f(Stack_handle_exception(stack), &Stack_handle_exception(stack)); f(Stack_handle_effect(stack), &Stack_handle_effect(stack)); f(Stack_parent(stack), &Stack_parent(stack)); if (Stack_sp(stack) == 0) return; sp = ((char*)stack_high) - Stack_sp(stack); next_chunk: if (sp == (char*)stack_high) return; context = (struct caml_context*)sp; regs = context->gc_regs; sp += sizeof(struct caml_context); if (sp == (char*)stack_high) return; retaddr = *(uintnat*)sp; sp += sizeof(value); while(1) { /* Find the descriptor corresponding to the return address */ h = Hash_retaddr(retaddr); while(1) { d = caml_frame_descriptors[h]; if (d->retaddr == retaddr) break; h = (h+1) & caml_frame_descriptors_mask; } if (d->frame_size != 0xFFFF) { /* Scan the roots in this frame */ for (p = d->live_ofs, n = d->num_live; n > 0; n--, p++) { ofs = *p; if (ofs & 1) { root = regs + (ofs >> 1); } else { root = (value *)(sp + ofs); } f (*root, root); } /* Move to next frame */ #ifndef Stack_grows_upwards sp += (d->frame_size & 0xFFFC); #else sp -= (d->frame_size & 0xFFFC); #endif retaddr = Saved_return_address(sp); /* XXX KC: disabled already scanned optimization. */ } else {