CAMLprim value caml_gc_set(value v) { uintnat newpf; uintnat newminwsz; #ifndef NATIVE_CODE caml_change_max_stack_size (Long_field (v, 5)); #endif newpf = norm_pfree (Long_field (v, 2)); if (newpf != caml_percent_free){ caml_percent_free = newpf; caml_gc_message (0x20, "New space overhead: %" ARCH_INTNAT_PRINTF_FORMAT "u%%\n", caml_percent_free); } /* Minor heap size comes last because it will trigger a minor collection (thus invalidating [v]) and it can raise [Out_of_memory]. */ newminwsz = caml_norm_minor_heap_size (Long_field (v, 0)); if (newminwsz != Caml_state->minor_heap_wsz){ caml_gc_message (0x20, "New minor heap size: %" ARCH_SIZET_PRINTF_FORMAT "uk words\n", newminwsz / 1024); caml_set_minor_heap_size (newminwsz); } return Val_unit; }
static void realloc_generic_table (struct generic_table *tbl, asize_t element_size, char * msg_intr_int, char *msg_threshold, char *msg_growing, char *msg_error) { CAMLassert (tbl->ptr == tbl->limit); CAMLassert (tbl->limit <= tbl->end); CAMLassert (tbl->limit >= tbl->threshold); if (tbl->base == NULL){ alloc_generic_table (tbl, Caml_state->minor_heap_wsz / 8, 256, element_size); }else if (tbl->limit == tbl->threshold){ CAML_INSTR_INT (msg_intr_int, 1); caml_gc_message (0x08, msg_threshold, 0); tbl->limit = tbl->end; caml_urge_major_slice (); }else{ asize_t sz; asize_t cur_ptr = tbl->ptr - tbl->base; tbl->size *= 2; sz = (tbl->size + tbl->reserve) * element_size; caml_gc_message (0x08, msg_growing, (intnat) sz/1024); tbl->base = caml_stat_resize_noexc (tbl->base, sz); if (tbl->base == NULL){ caml_fatal_error (msg_error); } tbl->end = tbl->base + (tbl->size + tbl->reserve) * element_size; tbl->threshold = tbl->base + tbl->size * element_size; tbl->ptr = tbl->base + cur_ptr; tbl->limit = tbl->end; } }
static int caml_page_table_resize(void) { struct page_table old = caml_page_table; uintnat * new_entries; uintnat i, h; caml_gc_message (0x08, "Growing page table to %lu entries\n", caml_page_table.size); new_entries = (uintnat *) calloc(2 * old.size, sizeof(uintnat)); if (new_entries == NULL) { caml_gc_message (0x08, "No room for growing page table\n", 0); return -1; } caml_page_table.size = 2 * old.size; caml_page_table.shift = old.shift - 1; caml_page_table.mask = caml_page_table.size - 1; caml_page_table.occupancy = old.occupancy; caml_page_table.entries = new_entries; for (i = 0; i < old.size; i++) { uintnat e = old.entries[i]; if (e == 0) continue; h = Hash(Page(e)); while (caml_page_table.entries[h] != 0) h = (h + 1) & caml_page_table.mask; caml_page_table.entries[h] = e; } free(old.entries); return 0; }
void caml_realloc_ref_table (struct caml_ref_table *tbl) { Assert (tbl->ptr == tbl->limit); Assert (tbl->limit <= tbl->end); Assert (tbl->limit >= tbl->threshold); if (tbl->base == NULL){ caml_alloc_table (tbl, caml_minor_heap_size / sizeof (value) / 8, 256); }else if (tbl->limit == tbl->threshold){ caml_gc_message (0x08, "ref_table threshold crossed\n", 0); tbl->limit = tbl->end; caml_urge_major_slice (); }else{ /* This will almost never happen with the bytecode interpreter. */ asize_t sz; asize_t cur_ptr = tbl->ptr - tbl->base; Assert (caml_force_major_slice); tbl->size *= 2; sz = (tbl->size + tbl->reserve) * sizeof (value *); caml_gc_message (0x08, "Growing ref_table to %" ARCH_INTNAT_PRINTF_FORMAT "dk bytes\n", (intnat) sz/1024); tbl->base = (value **) realloc ((char *) tbl->base, sz); if (tbl->base == NULL){ caml_fatal_error ("Fatal error: ref_table overflow\n"); } tbl->end = tbl->base + tbl->size + tbl->reserve; tbl->threshold = tbl->base + tbl->size; tbl->ptr = tbl->base + cur_ptr; tbl->limit = tbl->end; } }
char * caml_search_in_path(struct ext_table * path, char * name) { char * p, * fullname; int i; struct stat st; for (p = name; *p != 0; p++) { if (*p == '/' || *p == '\\') goto not_found; } for (i = 0; i < path->size; i++) { fullname = caml_stat_alloc(strlen((char *)(path->contents[i])) + strlen(name) + 2); strcpy(fullname, (char *)(path->contents[i])); strcat(fullname, "\\"); strcat(fullname, name); caml_gc_message(0x100, "Searching %s\n", (uintnat) fullname); if (stat(fullname, &st) == 0 && S_ISREG(st.st_mode)) return fullname; caml_stat_free(fullname); } not_found: caml_gc_message(0x100, "%s not found in search path\n", (uintnat) name); fullname = caml_stat_alloc(strlen(name) + 1); strcpy(fullname, name); return fullname; }
int caml_attempt_open(char **name, struct exec_trailer *trail, int do_open_script) { char * truename; int fd; int err; char buf [2]; truename = caml_search_exe_in_path(*name); *name = truename; caml_gc_message(0x100, "Opening bytecode executable %s\n", (uintnat) truename); fd = open(truename, O_RDONLY | O_BINARY); if (fd == -1) { caml_gc_message(0x100, "Cannot open file\n", 0); return FILE_NOT_FOUND; } if (!do_open_script) { err = read (fd, buf, 2); if (err < 2 || (buf [0] == '#' && buf [1] == '!')) { close(fd); caml_gc_message(0x100, "Rejected #! script\n", 0); return BAD_BYTECODE; } } err = read_trailer(fd, trail); if (err != 0) { close(fd); caml_gc_message(0x100, "Not a bytecode executable\n", 0); return err; } return fd; }
static void test_and_compact (void) { float fp; fp = 100.0 * caml_fl_cur_wsz / (caml_stat_heap_wsz - caml_fl_cur_wsz); if (fp > 999999.0) fp = 999999.0; caml_gc_message (0x200, "Estimated overhead (lower bound) = %" ARCH_INTNAT_PRINTF_FORMAT "u%%\n", (uintnat) fp); if (fp >= caml_percent_max && caml_stat_heap_chunks > 1){ caml_gc_message (0x200, "Automatic compaction triggered.\n", 0); caml_compact_heap (); } }
/* Make sure the minor heap is empty by performing a minor collection if needed. */ void caml_empty_minor_heap (void) { value **r; uintnat prev_alloc_words; if (caml_young_ptr != caml_young_end) { if (caml_minor_gc_begin_hook != NULL) (*caml_minor_gc_begin_hook) (); prev_alloc_words = caml_allocated_words; caml_in_minor_collection = 1; caml_gc_message (0x02, "<", 0); caml_oldify_local_roots(); for (r = caml_ref_table.base; r < caml_ref_table.ptr; r++) { caml_oldify_one (**r, *r); } caml_oldify_mopup (); for (r = caml_weak_ref_table.base; r < caml_weak_ref_table.ptr; r++) { if (Is_block (**r) && Is_young (**r)) { if (Hd_val (**r) == 0) { **r = Field (**r, 0); } else { **r = caml_weak_none; } } } if (caml_young_ptr < caml_young_start) caml_young_ptr = caml_young_start; caml_stat_minor_words += Wsize_bsize (caml_young_end - caml_young_ptr); caml_young_ptr = caml_young_end; caml_young_limit = caml_young_start; clear_table (&caml_ref_table); clear_table (&caml_weak_ref_table); caml_gc_message (0x02, ">", 0); caml_in_minor_collection = 0; caml_stat_promoted_words += caml_allocated_words - prev_alloc_words; ++ caml_stat_minor_collections; caml_final_empty_young (); if (caml_minor_gc_end_hook != NULL) (*caml_minor_gc_end_hook) (); } else { caml_final_empty_young (); } #ifdef DEBUG { value *p; for (p = (value *) caml_young_start; p < (value *) caml_young_end; ++p) { *p = Debug_free_minor; } ++ minor_gc_counter; } #endif }
static void test_and_compact (void) { uintnat fp; fp = (100 * caml_fl_cur_size) / (Wsize_bsize (caml_stat_heap_size) - caml_fl_cur_size); if (fp > 999999) fp = 999999; caml_gc_message (0x200, "Estimated overhead (lower bound) = %" ARCH_INTNAT_PRINTF_FORMAT "u%%\n", (uintnat) fp); if (fp >= caml_percent_max && caml_stat_heap_chunks > 1){ caml_gc_message (0x200, "Automatic compaction triggered.\n", 0); caml_compact_heap (); } }
CAMLprim value caml_gc_set(value v) { uintnat newpf, newpm; asize_t newheapincr; asize_t newminsize; uintnat oldpolicy; caml_verb_gc = Long_val (Field (v, 3)); #ifndef NATIVE_CODE caml_change_max_stack_size (Long_val (Field (v, 5))); #endif newpf = norm_pfree (Long_val (Field (v, 2))); if (newpf != caml_percent_free) { caml_percent_free = newpf; caml_gc_message (0x20, "New space overhead: %d%%\n", caml_percent_free); } newpm = norm_pmax (Long_val (Field (v, 4))); if (newpm != caml_percent_max) { caml_percent_max = newpm; caml_gc_message (0x20, "New max overhead: %d%%\n", caml_percent_max); } newheapincr = Bsize_wsize (norm_heapincr (Long_val (Field (v, 1)))); if (newheapincr != caml_major_heap_increment) { caml_major_heap_increment = newheapincr; caml_gc_message (0x20, "New heap increment size: %luk bytes\n", caml_major_heap_increment/1024); } oldpolicy = caml_allocation_policy; caml_set_allocation_policy (Long_val (Field (v, 6))); if (oldpolicy != caml_allocation_policy) { caml_gc_message (0x20, "New allocation policy: %d\n", caml_allocation_policy); } /* Minor heap size comes last because it will trigger a minor collection (thus invalidating [v]) and it can raise [Out_of_memory]. */ newminsize = norm_minsize (Bsize_wsize (Long_val (Field (v, 0)))); if (newminsize != caml_minor_heap_size) { caml_gc_message (0x20, "New minor heap size: %luk bytes\n", newminsize/1024); caml_set_minor_heap_size (newminsize); } return Val_unit; }
CAMLexport char * caml_search_exe_in_path(char * name) { char * fullname, * filepart; size_t fullnamelen; DWORD retcode; fullnamelen = strlen(name) + 1; if (fullnamelen < 256) fullnamelen = 256; while (1) { fullname = caml_stat_alloc(fullnamelen); retcode = SearchPath(NULL, /* use system search path */ name, ".exe", /* add .exe extension if needed */ fullnamelen, fullname, &filepart); if (retcode == 0) { caml_gc_message(0x100, "%s not found in search path\n", (uintnat) name); caml_stat_free(fullname); return caml_strdup(name); } if (retcode < fullnamelen) return fullname; caml_stat_free(fullname); fullnamelen = retcode + 1; } }
static void extern_stack_overflow(void) { caml_gc_message (0x04, "Stack overflow in marshaling value\n", 0); extern_replay_trail(); free_extern_output(); caml_raise_out_of_memory(); }
static void extern_stack_overflow_r(CAML_R) { caml_gc_message (0x04, "Stack overflow in marshaling value\n", 0); extern_replay_trail_r(ctx); free_extern_output_r(ctx); caml_raise_out_of_memory_r(ctx); }
CAMLexport char * caml_search_exe_in_path(char * name) { char * fullname, * filepart; DWORD pathlen, retcode; pathlen = strlen(name) + 1; if (pathlen < 256) pathlen = 256; while (1) { fullname = stat_alloc(pathlen); retcode = SearchPath(NULL, /* use system search path */ name, ".exe", /* add .exe extension if needed */ pathlen, fullname, &filepart); if (retcode == 0) { caml_gc_message(0x100, "%s not found in search path\n", (uintnat) name); strcpy(fullname, name); break; } if (retcode < pathlen) break; stat_free(fullname); pathlen = retcode + 1; } return fullname; }
/* Take a chunk of memory as argument, which must be the result of a call to [caml_alloc_for_heap], and insert it into the heap chaining. The contents of the chunk must be a sequence of valid blocks and fragments: no space between blocks and no trailing garbage. If some blocks are blue, they must be added to the free list by the caller. All other blocks must have the color [caml_allocation_color(m)]. The caller must update [caml_allocated_words] if applicable. Return value: 0 if no error; -1 in case of error. See also: caml_compact_heap, which duplicates most of this function. */ int caml_add_to_heap (char *m) { Assert (Chunk_size (m) % Page_size == 0); #ifdef DEBUG /* Should check the contents of the block. */ #endif /* debug */ caml_gc_message (0x04, "Growing heap to %luk bytes\n", (caml_stat_heap_size + Chunk_size (m)) / 1024); /* Register block in page table */ if (caml_page_table_add(In_heap, m, m + Chunk_size(m)) != 0) return -1; /* Chain this heap chunk. */ { char **last = &caml_heap_start; char *cur = *last; while (cur != NULL && cur < m){ last = &(Chunk_next (cur)); cur = *last; } Chunk_next (m) = cur; *last = m; ++ caml_stat_heap_chunks; } caml_stat_heap_size += Chunk_size (m); if (caml_stat_heap_size > caml_stat_top_heap_size){ caml_stat_top_heap_size = caml_stat_heap_size; } return 0; }
int main(int argc, char **argv) { #ifdef DEBUG { char *ocp; char *cp; int i; caml_gc_message (-1, "### OCaml runtime: debug mode ###\n", 0); #if 0 caml_gc_message (-1, "### command line:", 0); for (i = 0; i < argc; i++){ caml_gc_message (-1, " %s", argv[i]); } caml_gc_message (-1, "\n", 0); ocp = getenv ("OCAMLRUNPARAM"); caml_gc_message (-1, "### OCAMLRUNPARAM=%s\n", ocp == NULL ? "" : ocp); cp = getenv ("CAMLRUNPARAM"); caml_gc_message (-1, "### CAMLRUNPARAM=%s\n", cp == NULL ? "" : cp); caml_gc_message (-1, "### working dir: %s\n", getcwd (NULL, 0)); #endif } #endif #ifdef _WIN32 /* Expand wildcards and diversions in command line */ caml_expand_command_line(&argc, &argv); #endif caml_main(argv); caml_sys_exit(Val_int(0)); return 0; /* not reached */ }
void caml_compact_heap_maybe (void) { /* Estimated free words in the heap: FW = fl_size_at_change + 3 * (caml_fl_cur_size - caml_fl_size_at_phase_change) FW = 3 * caml_fl_cur_size - 2 * caml_fl_size_at_phase_change Estimated live words: LW = caml_stat_heap_size - FW Estimated free percentage: FP = 100 * FW / LW We compact the heap if FP > caml_percent_max */ uintnat fw, fp; Assert (caml_gc_phase == Phase_idle); if (caml_percent_max >= 1000000) return; if (caml_stat_major_collections < 3 || caml_stat_heap_chunks < 3) return; fw = 3 * caml_fl_cur_size - 2 * caml_fl_size_at_phase_change; if (fw < 0) fw = caml_fl_cur_size; if (fw >= Wsize_bsize (caml_stat_heap_size)){ fp = 1000000; }else{ fp = 100 * fw / (Wsize_bsize (caml_stat_heap_size) - fw); if (fp > 1000000) fp = 1000000; } caml_gc_message (0x200, "FL size at phase change = %" ARCH_INTNAT_PRINTF_FORMAT "u\n", caml_fl_size_at_phase_change); caml_gc_message (0x200, "Estimated overhead = %" ARCH_INTNAT_PRINTF_FORMAT "u%%\n", fp); if (fp >= caml_percent_max){ caml_gc_message (0x200, "Automatic compaction triggered.\n", 0); caml_finish_major_cycle (); /* We just did a complete GC, so we can measure the overhead exactly. */ fw = caml_fl_cur_size; fp = 100 * fw / (Wsize_bsize (caml_stat_heap_size) - fw); caml_gc_message (0x200, "Measured overhead: %" ARCH_INTNAT_PRINTF_FORMAT "u%%\n", fp); caml_compact_heap (); } }
CAMLprim value caml_gc_major(value v) { Assert (v == Val_unit); caml_gc_message (0x1, "Major GC cycle requested\n", 0); caml_empty_minor_heap (); caml_finish_major_cycle (); test_and_compact (); caml_final_do_calls (); return Val_unit; }
value caml_startup_common(char_os **argv, int pooling) { char_os * exe_name, * proc_self_exe; char tos; CAML_INIT_DOMAIN_STATE; /* Determine options */ caml_parse_ocamlrunparam(); #ifdef DEBUG caml_gc_message (-1, "### OCaml runtime: debug mode ###\n"); #endif if (caml_params->cleanup_on_exit) pooling = 1; if (!caml_startup_aux(pooling)) return Val_unit; #ifdef WITH_SPACETIME caml_spacetime_initialize(); #endif caml_init_ieee_floats(); #if defined(_MSC_VER) && __STDC_SECURE_LIB__ >= 200411L caml_install_invalid_parameter_handler(); #endif caml_init_custom_operations(); caml_init_gc (); if (caml_params->backtrace_enabled_init) caml_record_backtrace(Val_int(1)); /* Capture 16-byte aligned (ceil) system_stack_high */ Caml_state->system_stack_high = (char*)((((uintnat)&tos + 16) >> 4) << 4); init_segments(); caml_init_signals(); #ifdef _WIN32 caml_win32_overflow_detection(); #endif caml_debugger_init (); /* force debugger.o stub to be linked */ exe_name = argv[0]; if (exe_name == NULL) exe_name = _T(""); proc_self_exe = caml_executable_name(); if (proc_self_exe != NULL) exe_name = proc_self_exe; else exe_name = caml_search_exe_in_path(exe_name); caml_init_argv(exe_name, argv); if (sigsetjmp(caml_termination_jmpbuf.buf, 0)) { if (caml_termination_hook != NULL) caml_termination_hook(NULL); return Val_unit; } caml_init_main_stack(); return caml_start_program(Caml_state->young_ptr); }
void caml_init_gc (uintnat minor_size, uintnat major_size, uintnat major_incr, uintnat percent_fr, uintnat percent_m) { uintnat major_heap_size = Bsize_wsize (norm_heapincr (major_size)); caml_set_minor_heap_size (Bsize_wsize (norm_minsize (minor_size))); caml_major_heap_increment = Bsize_wsize (norm_heapincr (major_incr)); caml_percent_free = norm_pfree (percent_fr); caml_percent_max = norm_pmax (percent_m); caml_init_major_heap (major_heap_size); caml_gc_message (0x20, "Initial minor heap size: %luk bytes\n", caml_minor_heap_size / 1024); caml_gc_message (0x20, "Initial major heap size: %luk bytes\n", major_heap_size / 1024); caml_gc_message (0x20, "Initial space overhead: %lu%%\n", caml_percent_free); caml_gc_message (0x20, "Initial max overhead: %lu%%\n", caml_percent_max); caml_gc_message (0x20, "Initial heap increment: %luk bytes\n", caml_major_heap_increment / 1024); }
void caml_change_max_stack_size (uintnat new_max_size) { asize_t size = caml_stack_high - caml_extern_sp + Stack_threshold / sizeof (value); if (new_max_size < size) new_max_size = size; if (new_max_size != caml_max_stack_size){ caml_gc_message (0x08, "Changing stack limit to %luk bytes\n", new_max_size * sizeof (value) / 1024); } caml_max_stack_size = new_max_size; }
void caml_init_stack (uintnat initial_max_size) { caml_stack_low = (value *) caml_stat_alloc(Stack_size); caml_stack_high = caml_stack_low + Stack_size / sizeof (value); caml_stack_threshold = caml_stack_low + Stack_threshold / sizeof (value); caml_extern_sp = caml_stack_high; caml_trapsp = caml_stack_high; caml_trap_barrier = caml_stack_high + 1; caml_max_stack_size = initial_max_size; caml_gc_message (0x08, "Initial stack limit: %luk bytes\n", caml_max_stack_size / 1024 * sizeof (value)); }
CAMLprim value caml_gc_major(value v) { CAML_INSTR_SETUP (tmr, ""); Assert (v == Val_unit); caml_gc_message (0x1, "Major GC cycle requested\n", 0); caml_empty_minor_heap (); caml_finish_major_cycle (); test_and_compact (); caml_final_do_calls (); CAML_INSTR_TIME (tmr, "explicit/gc_major"); return Val_unit; }
CAMLprim value caml_gc_compaction(value v) { Assert (v == Val_unit); caml_gc_message (0x10, "Heap compaction requested\n", 0); caml_empty_minor_heap (); caml_finish_major_cycle (); caml_final_do_calls (); caml_empty_minor_heap (); caml_finish_major_cycle (); caml_compact_heap (); caml_final_do_calls (); return Val_unit; }
void caml_init_gc (uintnat minor_size, uintnat major_size, uintnat major_incr, uintnat percent_fr, uintnat percent_m) { uintnat major_heap_size = Bsize_wsize (norm_heapincr (major_size)); if (caml_page_table_initialize(Bsize_wsize(minor_size) + major_heap_size)) { caml_fatal_error ("OCaml runtime error: cannot initialize page table\n"); } caml_set_minor_heap_size (Bsize_wsize (norm_minsize (minor_size))); caml_major_heap_increment = Bsize_wsize (norm_heapincr (major_incr)); caml_percent_free = norm_pfree (percent_fr); caml_percent_max = norm_pmax (percent_m); caml_init_major_heap (major_heap_size); caml_gc_message (0x20, "Initial minor heap size: %luk bytes\n", caml_minor_heap_size / 1024); caml_gc_message (0x20, "Initial major heap size: %luk bytes\n", major_heap_size / 1024); caml_gc_message (0x20, "Initial space overhead: %lu%%\n", caml_percent_free); caml_gc_message (0x20, "Initial max overhead: %lu%%\n", caml_percent_max); caml_gc_message (0x20, "Initial heap increment: %luk bytes\n", caml_major_heap_increment / 1024); caml_gc_message (0x20, "Initial allocation policy: %d\n", caml_allocation_policy); }
CAMLprim value caml_dynlink_open_lib(value mode, value filename) { void * handle; value result; caml_gc_message(0x100, "Opening shared library %s\n", (uintnat) String_val(filename)); handle = caml_dlopen(String_val(filename), Int_val(mode), 1); if (handle == NULL) caml_failwith(caml_dlerror()); result = caml_alloc_small(1, Abstract_tag); Handle_val(result) = handle; return result; }
char * caml_search_in_path(struct ext_table * path, char * name) { char * p, * dir, * fullname; int i; struct stat st; for (p = name; *p != 0; p++) { if (*p == '/' || *p == '\\') goto not_found; } for (i = 0; i < path->size; i++) { dir = path->contents[i]; if (dir[0] == 0) continue; /* not sure what empty path components mean under Windows */ fullname = caml_strconcat(3, dir, "\\", name); caml_gc_message(0x100, "Searching %s\n", (uintnat) fullname); if (stat(fullname, &st) == 0 && S_ISREG(st.st_mode)) return fullname; caml_stat_free(fullname); } not_found: caml_gc_message(0x100, "%s not found in search path\n", (uintnat) name); return caml_strdup(name); }
/* Open the given shared library and add it to shared_libs. Abort on error. */ static void open_shared_lib(char * name) { char * realname; void * handle; realname = caml_search_dll_in_path(&caml_shared_libs_path, name); caml_gc_message(0x100, "Loading shared library %s\n", (uintnat) realname); handle = caml_dlopen(realname, 1, 1); if (handle == NULL) caml_fatal_error_arg2("Fatal error: cannot load shared library %s\n", name, "Reason: %s\n", caml_dlerror()); caml_ext_table_add(&shared_libs, handle); caml_stat_free(realname); }
CAMLprim value caml_gc_compaction(value v) { CAML_INSTR_SETUP (tmr, ""); Assert (v == Val_unit); caml_gc_message (0x10, "Heap compaction requested\n", 0); caml_empty_minor_heap (); caml_finish_major_cycle (); caml_final_do_calls (); caml_empty_minor_heap (); caml_finish_major_cycle (); caml_compact_heap (); caml_final_do_calls (); CAML_INSTR_TIME (tmr, "explicit/gc_compact"); return Val_unit; }
/* Allocate more memory from malloc for the heap. Return a blue block of at least the requested size. The blue block is chained to a sequence of blue blocks (through their field 0); the last block of the chain is pointed by field 1 of the first. There may be a fragment after the last block. The caller must insert the blocks into the free list. [request] is a number of words and must be less than or equal to [Max_wosize]. Return NULL when out of memory. */ static value *expand_heap (mlsize_t request) { /* these point to headers, but we do arithmetic on them, hence [value *]. */ value *mem, *hp, *prev; asize_t over_request, malloc_request, remain; Assert (request <= Max_wosize); over_request = Whsize_wosize (request + request / 100 * caml_percent_free); malloc_request = caml_round_heap_chunk_wsz (over_request); mem = (value *) caml_alloc_for_heap (Bsize_wsize (malloc_request)); if (mem == NULL){ caml_gc_message (0x04, "No room for growing heap\n", 0); return NULL; } remain = malloc_request; prev = hp = mem; /* FIXME find a way to do this with a call to caml_make_free_blocks */ while (Wosize_whsize (remain) > Max_wosize){ Hd_hp (hp) = Make_header (Max_wosize, 0, Caml_blue); #ifdef DEBUG caml_set_fields (Val_hp (hp), 0, Debug_free_major); #endif hp += Whsize_wosize (Max_wosize); remain -= Whsize_wosize (Max_wosize); Field (Val_hp (mem), 1) = Field (Val_hp (prev), 0) = Val_hp (hp); prev = hp; } if (remain > 1){ Hd_hp (hp) = Make_header (Wosize_whsize (remain), 0, Caml_blue); #ifdef DEBUG caml_set_fields (Val_hp (hp), 0, Debug_free_major); #endif Field (Val_hp (mem), 1) = Field (Val_hp (prev), 0) = Val_hp (hp); Field (Val_hp (hp), 0) = (value) NULL; }else{ Field (Val_hp (prev), 0) = (value) NULL; if (remain == 1) Hd_hp (hp) = Make_header (0, 0, Caml_white); } Assert (Wosize_hp (mem) >= request); if (caml_add_to_heap ((char *) mem) != 0){ caml_free_for_heap ((char *) mem); return NULL; } return Op_hp (mem); }