uintnat caml_normalize_heap_increment (uintnat i) { if (i < Bsize_wsize (Heap_chunk_min)){ i = Bsize_wsize (Heap_chunk_min); } return ((i + Page_size - 1) >> Page_log) << Page_log; }
void caml_init_gc (uintnat minor_size, uintnat major_size, uintnat major_incr, uintnat percent_fr, uintnat percent_m) { uintnat major_heap_size = Bsize_wsize (norm_heapincr (major_size)); if (caml_page_table_initialize(Bsize_wsize(minor_size) + major_heap_size)) { caml_fatal_error ("OCaml runtime error: cannot initialize page table\n"); } caml_set_minor_heap_size (Bsize_wsize (norm_minsize (minor_size))); caml_major_heap_increment = Bsize_wsize (norm_heapincr (major_incr)); caml_percent_free = norm_pfree (percent_fr); caml_percent_max = norm_pmax (percent_m); caml_init_major_heap (major_heap_size); caml_gc_message (0x20, "Initial minor heap size: %luk bytes\n", caml_minor_heap_size / 1024); caml_gc_message (0x20, "Initial major heap size: %luk bytes\n", major_heap_size / 1024); caml_gc_message (0x20, "Initial space overhead: %lu%%\n", caml_percent_free); caml_gc_message (0x20, "Initial max overhead: %lu%%\n", caml_percent_max); caml_gc_message (0x20, "Initial heap increment: %luk bytes\n", caml_major_heap_increment / 1024); caml_gc_message (0x20, "Initial allocation policy: %d\n", caml_allocation_policy); }
CAMLprim value caml_string_length_based_compare(value s1, value s2) { mlsize_t len1, len2; mlsize_t temp; int res; if (s1 == s2) return Val_int(0); len1 = Wosize_val(s1); temp = Bsize_wsize(len1) - 1 ; len1 = temp - Byte(s1,temp); len2 = Wosize_val(s2); temp = Bsize_wsize(len2) - 1 ; len2 = temp - Byte(s2,temp); if (len1 != len2) { if (len1 < len2 ) { return Val_long_clang(-1); } else { return Val_long_clang(1); } } else { res = memcmp(String_val(s1), String_val(s2), len1); if(res < 0) return Val_long_clang(-1); if(res > 0) return Val_long_clang(1); return Val_long_clang(0); } }
/* size in bytes */ void caml_set_minor_heap_size (asize_t size) { char *new_heap; void *new_heap_base; Assert (size >= Bsize_wsize(Minor_heap_min)); Assert (size <= Bsize_wsize(Minor_heap_max)); Assert (size % sizeof (value) == 0); if (caml_young_ptr != caml_young_end) caml_minor_collection (); Assert (caml_young_ptr == caml_young_end); new_heap = caml_aligned_malloc(size, 0, &new_heap_base); if (new_heap == NULL) caml_raise_out_of_memory(); if (caml_page_table_add(In_young, new_heap, new_heap + size) != 0) caml_raise_out_of_memory(); if (caml_young_start != NULL){ caml_page_table_remove(In_young, caml_young_start, caml_young_end); free (caml_young_base); } caml_young_base = new_heap_base; caml_young_start = new_heap; caml_young_end = new_heap + size; caml_young_limit = caml_young_start; caml_young_ptr = caml_young_end; caml_minor_heap_size = size; reset_table (&caml_ref_table); reset_table (&caml_weak_ref_table); }
static pool* pool_acquire(struct caml_heap_state* local) { pool* r; if (local->num_free_pools > 0) { r = local->free_pools; local->free_pools = r->next; local->num_free_pools--; } else { caml_plat_lock(&pool_freelist.lock); if (!pool_freelist.free) { void* mem = caml_mem_map(Bsize_wsize(POOL_WSIZE) * POOLS_PER_ALLOCATION, Bsize_wsize(POOL_WSIZE), 0 /* allocate */); int i; if (mem) { pool_freelist.free = mem; for (i=1; i<POOLS_PER_ALLOCATION; i++) { r = (pool*)(((uintnat)mem) + ((uintnat)i) * Bsize_wsize(POOL_WSIZE)); r->next = pool_freelist.free; r->owner = 0; pool_freelist.free = r; } } } r = pool_freelist.free; if (r) pool_freelist.free = r->next; caml_plat_unlock(&pool_freelist.lock); } if (r) Assert (r->owner == 0); return r; }
CAMLprim value caml_gc_set(value v) { uintnat newpf, newpm; asize_t newheapincr; asize_t newminsize; uintnat oldpolicy; caml_verb_gc = Long_val (Field (v, 3)); #ifndef NATIVE_CODE caml_change_max_stack_size (Long_val (Field (v, 5))); #endif newpf = norm_pfree (Long_val (Field (v, 2))); if (newpf != caml_percent_free) { caml_percent_free = newpf; caml_gc_message (0x20, "New space overhead: %d%%\n", caml_percent_free); } newpm = norm_pmax (Long_val (Field (v, 4))); if (newpm != caml_percent_max) { caml_percent_max = newpm; caml_gc_message (0x20, "New max overhead: %d%%\n", caml_percent_max); } newheapincr = Bsize_wsize (norm_heapincr (Long_val (Field (v, 1)))); if (newheapincr != caml_major_heap_increment) { caml_major_heap_increment = newheapincr; caml_gc_message (0x20, "New heap increment size: %luk bytes\n", caml_major_heap_increment/1024); } oldpolicy = caml_allocation_policy; caml_set_allocation_policy (Long_val (Field (v, 6))); if (oldpolicy != caml_allocation_policy) { caml_gc_message (0x20, "New allocation policy: %d\n", caml_allocation_policy); } /* Minor heap size comes last because it will trigger a minor collection (thus invalidating [v]) and it can raise [Out_of_memory]. */ newminsize = norm_minsize (Bsize_wsize (Long_val (Field (v, 0)))); if (newminsize != caml_minor_heap_size) { caml_gc_message (0x20, "New minor heap size: %luk bytes\n", newminsize/1024); caml_set_minor_heap_size (newminsize); } return Val_unit; }
/* Take a chunk of memory as argument, which must be the result of a call to [caml_alloc_for_heap], and insert it into the heap chaining. The contents of the chunk must be a sequence of valid blocks and fragments: no space between blocks and no trailing garbage. If some blocks are blue, they must be added to the free list by the caller. All other blocks must have the color [caml_allocation_color(m)]. The caller must update [caml_allocated_words] if applicable. Return value: 0 if no error; -1 in case of error. See also: caml_compact_heap, which duplicates most of this function. */ int caml_add_to_heap (char *m) { Assert (Chunk_size (m) % Page_size == 0); #ifdef DEBUG /* Should check the contents of the block. */ #endif /* debug */ caml_gc_message (0x04, "Growing heap to %luk bytes\n", (Bsize_wsize (caml_stat_heap_wsz) + Chunk_size (m)) / 1024); /* Register block in page table */ if (caml_page_table_add(In_heap, m, m + Chunk_size(m)) != 0) return -1; /* Chain this heap chunk. */ { char **last = &caml_heap_start; char *cur = *last; while (cur != NULL && cur < m){ last = &(Chunk_next (cur)); cur = *last; } Chunk_next (m) = cur; *last = m; ++ caml_stat_heap_chunks; } caml_stat_heap_wsz += Wsize_bsize (Chunk_size (m)); if (caml_stat_heap_wsz > caml_stat_top_heap_wsz){ caml_stat_top_heap_wsz = caml_stat_heap_wsz; } return 0; }
CAMLprim value netsys_init_string(value memv, value offv, value lenv) { struct caml_bigarray *b = Bigarray_val(memv); intnat off = Long_val(offv); intnat len = Long_val(lenv); value *m; char *m_b; mlsize_t wosize; mlsize_t offset_index; #ifdef ARCH_SIXTYFOUR if (off % 8 != 0) invalid_argument("Netsys_mem.init_string"); #else if (off % 4 != 0) invalid_argument("Netsys_mem.init_string"); #endif m = (value *) (((char *) b->data) + off); m_b = (char *) m; wosize = (len + sizeof (value)) / sizeof (value); /* >= 1 */ m[0] = /* Make_header (wosize, String_tag, Caml_white) */ (value) (((header_t) wosize << 10) + String_tag); m[wosize] = 0; offset_index = Bsize_wsize (wosize) - 1; m_b[offset_index + sizeof(value)] = offset_index - len; return Val_unit; }
value* caml_shared_try_alloc(struct caml_heap_state* local, mlsize_t wosize, tag_t tag, int pinned) { mlsize_t whsize = Whsize_wosize(wosize); value* p; uintnat colour; Assert (wosize > 0); Assert (tag != Infix_tag); if (whsize <= SIZECLASS_MAX) { sizeclass sz = sizeclass_wsize[whsize]; Assert(wsize_sizeclass[sz] >= whsize); p = pool_allocate(local, sz); if (!p) return 0; struct heap_stats* s = &local->stats; s->pool_live_blocks++; s->pool_live_words += whsize; s->pool_frag_words += wsize_sizeclass[sz] - whsize; } else { p = large_allocate(local, Bsize_wsize(whsize)); if (!p) return 0; } colour = pinned ? NOT_MARKABLE : global.MARKED; Hd_hp (p) = Make_header(wosize, tag, colour); #ifdef DEBUG { int i; for (i = 0; i < wosize; i++) { Op_val(Val_hp(p))[i] = Debug_free_major; } } #endif return p; }
/* [allocate_block] is called by [caml_fl_allocate]. Given a suitable free block and the desired size, it allocates a new block from the free block. There are three cases: 0. The free block has the desired size. Detach the block from the free-list and return it. 1. The free block is 1 word longer than the desired size. Detach the block from the free list. The remaining word cannot be linked: turn it into an empty block (header only), and return the rest. 2. The free block is big enough. Split it in two and return the right block. In all cases, the allocated block is right-justified in the free block: it is located in the high-address words of the free block. This way, the linking of the free-list does not change in case 2. */ static char *allocate_block (mlsize_t wh_sz, int flpi, char *prev, char *cur) { header_t h = Hd_bp (cur); Assert (Whsize_hd (h) >= wh_sz); if (Wosize_hd (h) < wh_sz + 1){ /* Cases 0 and 1. */ caml_fl_cur_size -= Whsize_hd (h); Next (prev) = Next (cur); Assert (Is_in_heap (Next (prev)) || Next (prev) == NULL); if (caml_fl_merge == cur) caml_fl_merge = prev; #ifdef DEBUG fl_last = NULL; #endif /* In case 1, the following creates the empty block correctly. In case 0, it gives an invalid header to the block. The function calling [caml_fl_allocate] will overwrite it. */ Hd_op (cur) = Make_header (0, 0, Caml_white); if (policy == Policy_first_fit){ if (flpi + 1 < flp_size && flp[flpi + 1] == cur){ flp[flpi + 1] = prev; }else if (flpi == flp_size - 1){ beyond = (prev == Fl_head) ? NULL : prev; -- flp_size; } } }else{ /* Case 2. */ caml_fl_cur_size -= wh_sz; Hd_op (cur) = Make_header (Wosize_hd (h) - wh_sz, 0, Caml_blue); } if (policy == Policy_next_fit) fl_prev = prev; return cur + Bosize_hd (h) - Bsize_wsize (wh_sz); }
CAMLprim value caml_create_string(value len) { mlsize_t size = Long_val(len); if (size > Bsize_wsize (Max_wosize) - 1){ caml_invalid_argument("String.create"); } return caml_alloc_string(size); }
void caml_compact_heap (void) { uintnat target_size, live; do_compaction (); /* Compaction may fail to shrink the heap to a reasonable size because it deals in complete chunks: if a very large chunk is at the beginning of the heap, everything gets moved to it and it is not freed. In that case, we allocate a new chunk of the desired heap size, chain it at the beginning of the heap (thus pretending its address is smaller), and launch a second compaction. This will move all data to this new chunk and free the very large chunk. See PR#5389 */ /* We compute: freewords = caml_fl_cur_size (exact) heapsize = caml_heap_size (exact) live = heap_size - freewords target_size = live * (1 + caml_percent_free / 100) = live / 100 * (100 + caml_percent_free) We add 1 to live/100 to make sure it isn't 0. We recompact if target_size < heap_size / 2 */ live = caml_stat_heap_size - Bsize_wsize (caml_fl_cur_size); target_size = (live / 100 + 1) * (100 + caml_percent_free); target_size = caml_round_heap_chunk_size (target_size); if (target_size < caml_stat_heap_size / 2) { char *chunk; /* round it up to a page size */ chunk = caml_alloc_for_heap (target_size); if (chunk == NULL) return; caml_make_free_blocks ((value *) chunk, Wsize_bsize (Chunk_size (chunk)), 0); if (caml_page_table_add (In_heap, chunk, chunk + Chunk_size (chunk)) != 0) { caml_free_for_heap (chunk); return; } Chunk_next (chunk) = caml_heap_start; caml_heap_start = chunk; ++ caml_stat_heap_chunks; caml_stat_heap_size += Chunk_size (chunk); if (caml_stat_heap_size > caml_stat_top_heap_size) { caml_stat_top_heap_size = caml_stat_heap_size; } do_compaction (); Assert (caml_stat_heap_chunks == 1); Assert (Chunk_next (caml_heap_start) == NULL); } }
void caml_init_gc (uintnat minor_size, uintnat major_size, uintnat major_incr, uintnat percent_fr, uintnat percent_m) { uintnat major_heap_size = Bsize_wsize (norm_heapincr (major_size)); caml_set_minor_heap_size (Bsize_wsize (norm_minsize (minor_size))); caml_major_heap_increment = Bsize_wsize (norm_heapincr (major_incr)); caml_percent_free = norm_pfree (percent_fr); caml_percent_max = norm_pmax (percent_m); caml_init_major_heap (major_heap_size); caml_gc_message (0x20, "Initial minor heap size: %luk bytes\n", caml_minor_heap_size / 1024); caml_gc_message (0x20, "Initial major heap size: %luk bytes\n", major_heap_size / 1024); caml_gc_message (0x20, "Initial space overhead: %lu%%\n", caml_percent_free); caml_gc_message (0x20, "Initial max overhead: %lu%%\n", caml_percent_max); caml_gc_message (0x20, "Initial heap increment: %luk bytes\n", caml_major_heap_increment / 1024); }
void caml_redarken_pool(struct pool* r, scanning_action f, void* fdata) { mlsize_t wh = wsize_sizeclass[r->sz]; value* p = (value*)((char*)r + POOL_HEADER_SZ); value* end = (value*)((char*)r + Bsize_wsize(POOL_WSIZE)); while (p + wh <= end) { header_t hd = p[0]; if (hd != 0 && Has_status_hd(hd, global.MARKED)) { f(fdata, Val_hp(p), 0); } p += wh; } }
/* [minor_size] and [major_size] are numbers of words [major_incr] is either a percentage or a number of words */ void caml_init_gc (uintnat minor_size, uintnat major_size, uintnat major_incr, uintnat percent_fr, uintnat percent_m, uintnat window) { uintnat major_heap_size = Bsize_wsize (caml_normalize_heap_increment (major_size)); CAML_INSTR_INIT (); if (caml_init_alloc_for_heap () != 0){ caml_fatal_error ("cannot initialize heap: mmap failed\n"); } if (caml_page_table_initialize(Bsize_wsize(minor_size) + major_heap_size)){ caml_fatal_error ("OCaml runtime error: cannot initialize page table\n"); } caml_set_minor_heap_size (Bsize_wsize (norm_minsize (minor_size))); caml_major_heap_increment = major_incr; caml_percent_free = norm_pfree (percent_fr); caml_percent_max = norm_pmax (percent_m); caml_init_major_heap (major_heap_size); caml_major_window = norm_window (window); caml_gc_message (0x20, "Initial minor heap size: %luk words\n", caml_minor_heap_wsz / 1024); caml_gc_message (0x20, "Initial major heap size: %luk bytes\n", major_heap_size / 1024); caml_gc_message (0x20, "Initial space overhead: %lu%%\n", caml_percent_free); caml_gc_message (0x20, "Initial max overhead: %lu%%\n", caml_percent_max); if (caml_major_heap_increment > 1000){ caml_gc_message (0x20, "Initial heap increment: %luk words\n", caml_major_heap_increment / 1024); }else{ caml_gc_message (0x20, "Initial heap increment: %lu%%\n", caml_major_heap_increment); } caml_gc_message (0x20, "Initial allocation policy: %d\n", caml_allocation_policy); caml_gc_message (0x20, "Initial smoothing window: %d\n", caml_major_window); }
asize_t caml_norm_minor_heap_size (intnat wsize) { asize_t page_size = caml_mem_round_up_pages(1); asize_t bs, max; if (wsize < Minor_heap_min) wsize = Minor_heap_min; bs = caml_mem_round_up_pages(Bsize_wsize (wsize)); Assert(page_size * 2 < (1 << Minor_heap_align_bits)); max = (1 << Minor_heap_align_bits) - page_size * 2; if (bs > max) bs = max; return bs; }
static value netsys_alloc_string_shr(mlsize_t len) { /* Always allocate in major heap */ value result; mlsize_t offset_index; mlsize_t wosize = (len + sizeof (value)) / sizeof (value); result = caml_alloc_shr (wosize, String_tag); result = caml_check_urgent_gc (result); Field (result, wosize - 1) = 0; offset_index = Bsize_wsize (wosize) - 1; Byte (result, offset_index) = offset_index - len; return result; }
void caml_init_gc (uintnat minor_size, uintnat major_size, uintnat major_incr, uintnat percent_fr, uintnat percent_m) { uintnat major_heap_size = Bsize_wsize (norm_heapincr (major_size)); #ifdef DEBUG caml_gc_message (-1, "### O'Caml runtime: debug mode ###\n", 0); #endif caml_set_minor_heap_size (Bsize_wsize (norm_minsize (minor_size))); caml_major_heap_increment = Bsize_wsize (norm_heapincr (major_incr)); caml_percent_free = norm_pfree (percent_fr); caml_percent_max = norm_pmax (percent_m); caml_init_major_heap (major_heap_size); caml_gc_message (0x20, "Initial minor heap size: %luk bytes\n", caml_minor_heap_size / 1024); caml_gc_message (0x20, "Initial major heap size: %luk bytes\n", major_heap_size / 1024); caml_gc_message (0x20, "Initial space overhead: %lu%%\n", caml_percent_free); caml_gc_message (0x20, "Initial max overhead: %lu%%\n", caml_percent_max); caml_gc_message (0x20, "Initial heap increment: %luk bytes\n", caml_major_heap_increment / 1024); }
CAMLexport value caml_alloc_string (mlsize_t len) { value result; mlsize_t offset_index; mlsize_t wosize = (len + sizeof (value)) / sizeof (value); if (wosize <= Max_young_wosize) { Alloc_small (result, wosize, String_tag); }else{ result = caml_alloc_shr (wosize, String_tag); result = caml_check_urgent_gc (result); } Field (result, wosize - 1) = 0; offset_index = Bsize_wsize (wosize) - 1; Byte (result, offset_index) = offset_index - len; return result; }
/* Allocate more memory from malloc for the heap. Return a blue block of at least the requested size. The blue block is chained to a sequence of blue blocks (through their field 0); the last block of the chain is pointed by field 1 of the first. There may be a fragment after the last block. The caller must insert the blocks into the free list. [request] is a number of words and must be less than or equal to [Max_wosize]. Return NULL when out of memory. */ static value *expand_heap (mlsize_t request) { /* these point to headers, but we do arithmetic on them, hence [value *]. */ value *mem, *hp, *prev; asize_t over_request, malloc_request, remain; Assert (request <= Max_wosize); over_request = Whsize_wosize (request + request / 100 * caml_percent_free); malloc_request = caml_round_heap_chunk_wsz (over_request); mem = (value *) caml_alloc_for_heap (Bsize_wsize (malloc_request)); if (mem == NULL){ caml_gc_message (0x04, "No room for growing heap\n", 0); return NULL; } remain = malloc_request; prev = hp = mem; /* FIXME find a way to do this with a call to caml_make_free_blocks */ while (Wosize_whsize (remain) > Max_wosize){ Hd_hp (hp) = Make_header (Max_wosize, 0, Caml_blue); #ifdef DEBUG caml_set_fields (Val_hp (hp), 0, Debug_free_major); #endif hp += Whsize_wosize (Max_wosize); remain -= Whsize_wosize (Max_wosize); Field (Val_hp (mem), 1) = Field (Val_hp (prev), 0) = Val_hp (hp); prev = hp; } if (remain > 1){ Hd_hp (hp) = Make_header (Wosize_whsize (remain), 0, Caml_blue); #ifdef DEBUG caml_set_fields (Val_hp (hp), 0, Debug_free_major); #endif Field (Val_hp (mem), 1) = Field (Val_hp (prev), 0) = Val_hp (hp); Field (Val_hp (hp), 0) = (value) NULL; }else{ Field (Val_hp (prev), 0) = (value) NULL; if (remain == 1) Hd_hp (hp) = Make_header (0, 0, Caml_white); } Assert (Wosize_hp (mem) >= request); if (caml_add_to_heap ((char *) mem) != 0){ caml_free_for_heap ((char *) mem); return NULL; } return Op_hp (mem); }
void caml_array_bound_error(void) { if (! array_bound_error_bucket_inited) { mlsize_t wosize = (BOUND_MSG_LEN + sizeof(value)) / sizeof(value); mlsize_t offset_index = Bsize_wsize(wosize) - 1; array_bound_error_msg.hdr = Make_header(wosize, String_tag, Caml_white); array_bound_error_msg.data[offset_index] = offset_index - BOUND_MSG_LEN; array_bound_error_bucket.hdr = Make_header(2, 0, Caml_white); array_bound_error_bucket.exn = (value) caml_exn_Invalid_argument; array_bound_error_bucket.arg = (value) array_bound_error_msg.data; array_bound_error_bucket_inited = 1; caml_page_table_add(In_static_data, &array_bound_error_msg, &array_bound_error_msg + 1); array_bound_error_bucket_inited = 1; } caml_raise((value) &array_bound_error_bucket.exn); }
static value next_minor_block(caml_domain_state* domain_state, value curr_hp) { mlsize_t wsz; header_t hd; value curr_val; CAMLassert ((value)domain_state->young_ptr <= curr_hp); CAMLassert (curr_hp < (value)domain_state->young_end); hd = Hd_hp(curr_hp); curr_val = Val_hp(curr_hp); if (hd == 0) { /* Forwarded object, find the promoted version */ curr_val = Op_val(curr_val)[0]; } CAMLassert (Is_block(curr_val) && Hd_val(curr_val) != 0 && Tag_val(curr_val) != Infix_tag); wsz = Wosize_val(curr_val); CAMLassert (wsz <= Max_young_wosize); return curr_hp + Bsize_wsize(Whsize_wosize(wsz)); }
static pool* pool_find(struct caml_heap_state* local, sizeclass sz) { pool* r; /* Hopefully we have a pool we can use directly */ r = local->avail_pools[sz]; if (r) return r; /* Otherwise, try to sweep until we find one */ while (!local->avail_pools[sz] && pool_sweep(local, &local->unswept_avail_pools[sz], sz)); while (!local->avail_pools[sz] && pool_sweep(local, &local->unswept_full_pools[sz], sz)); r = local->avail_pools[sz]; if (r) return r; /* Failing that, we need to allocate a new pool */ r = pool_acquire(local); if (!r) return 0; /* if we can't allocate, give up */ local->stats.pool_words += POOL_WSIZE; if (local->stats.pool_words > local->stats.pool_max_words) local->stats.pool_max_words = local->stats.pool_words; local->stats.pool_frag_words += POOL_HEADER_WSIZE + wastage_sizeclass[sz]; /* Having allocated a new pool, set it up for size sz */ local->avail_pools[sz] = r; r->next = 0; r->owner = local->owner; r->next_obj = 0; r->sz = sz; mlsize_t wh = wsize_sizeclass[sz]; value* p = (value*)((char*)r + POOL_HEADER_SZ); value* end = (value*)((char*)r + Bsize_wsize(POOL_WSIZE)); while (p + wh <= end) { p[0] = 0; /* zero header indicates free object */ p[1] = (value)r->next_obj; r->next_obj = p; p += wh; } return r; }
CAMLexport value caml_alloc (mlsize_t wosize, tag_t tag) { value result; mlsize_t i; Assert (tag < 256); Assert (tag != Infix_tag); if (wosize == 0){ result = Atom (tag); }else if (wosize <= Max_young_wosize){ Alloc_small (result, wosize, tag); if (tag < No_scan_tag){ for (i = 0; i < wosize; i++) Field (result, i) = 0; } }else{ result = caml_alloc_shr (wosize, tag); if (tag < No_scan_tag) memset (Bp_val (result), 0, Bsize_wsize (wosize)); result = caml_check_urgent_gc (result); } return result; }
value* caml_shared_try_alloc(struct caml_heap_state* local, mlsize_t wosize, tag_t tag, int pinned) { mlsize_t whsize = Whsize_wosize(wosize); value* p; Assert (wosize > 0); Assert (tag != Infix_tag); if (whsize <= SIZECLASS_MAX) { p = pool_allocate(local, sizeclass_wsize[whsize]); } else { p = large_allocate(local, Bsize_wsize(whsize)); } if (!p) return 0; Hd_hp (p) = Make_header(wosize, tag, pinned ? NOT_MARKABLE : global.UNMARKED); #ifdef DEBUG { int i; for (i = 0; i < wosize; i++) { Op_val(Val_hp(p))[i] = Debug_free_major; } } #endif return p; }
void adjust_pointers(value * start, mlsize_t size, color_t color) { value * p, * q; mlsize_t sz; header_t hd; tag_t tag; value v; mlsize_t bosize; p = start; q = p + size; bosize = Bsize_wsize(size); while (p < q) { hd = *p; sz = Wosize_hd(hd); tag = Tag_hd(hd); *p++ = Make_header(sz, tag, color); if (tag >= No_scan_tag) p += sz; else for( ; sz > 0; sz--, p++) { v = *p; switch(v & 3) { case 0: /* 0 -> A bloc represented by its offset. */ assert(v >= 0 && v <= bosize && (v & 3) == 0); *p = (value) ((byteoffset_t) start + v); break; case 2: /* 2 -> An atom. */ v = v >> 2; assert(v >= 0 && v < 256); *p = Atom(v); break; default: /* 1 or 3 -> An integer. */ break; } } } }
/* [allocate_block] is called by [caml_fl_allocate]. Given a suitable free block and the desired size, it allocates a new block from the free block. There are three cases: 0. The free block has the desired size. Detach the block from the free-list and return it. 1. The free block is 1 word longer than the desired size. Detach the block from the free list. The remaining word cannot be linked: turn it into an empty block (header only), and return the rest. 2. The free block is big enough. Split it in two and return the right block. In all cases, the allocated block is right-justified in the free block: it is located in the high-address words of the free block. This way, the linking of the free-list does not change in case 2. */ static char *allocate_block (mlsize_t wh_sz, char *prev, char *cur) { header_t h = Hd_bp (cur); Assert (Whsize_hd (h) >= wh_sz); if (Wosize_hd (h) < wh_sz + 1){ /* Cases 0 and 1. */ caml_fl_cur_size -= Whsize_hd (h); Next (prev) = Next (cur); Assert (Is_in_heap (Next (prev)) || Next (prev) == NULL); if (caml_fl_merge == cur) caml_fl_merge = prev; #ifdef DEBUG fl_last = NULL; #endif /* In case 1, the following creates the empty block correctly. In case 0, it gives an invalid header to the block. The function calling [caml_fl_allocate] will overwrite it. */ Hd_op (cur) = Make_header (0, 0, Caml_white); }else{ /* Case 2. */ caml_fl_cur_size -= wh_sz; Hd_op (cur) = Make_header (Wosize_hd (h) - wh_sz, 0, Caml_blue); } fl_prev = prev; return cur + Bosize_hd (h) - Bsize_wsize (wh_sz); }
/* [caml_fl_merge_block] returns the head pointer of the next block after [bp], because merging blocks may change the size of [bp]. */ char *caml_fl_merge_block (char *bp) { char *prev, *cur, *adj; header_t hd = Hd_bp (bp); mlsize_t prev_wosz; caml_fl_cur_size += Whsize_hd (hd); #ifdef DEBUG caml_set_fields (bp, 0, Debug_free_major); #endif prev = caml_fl_merge; cur = Next (prev); /* The sweep code makes sure that this is the right place to insert this block: */ Assert (prev < bp || prev == Fl_head); Assert (cur > bp || cur == NULL); if (policy == Policy_first_fit) truncate_flp (prev); /* If [last_fragment] and [bp] are adjacent, merge them. */ if (last_fragment == Hp_bp (bp)){ mlsize_t bp_whsz = Whsize_bp (bp); if (bp_whsz <= Max_wosize){ hd = Make_header (bp_whsz, 0, Caml_white); bp = last_fragment; Hd_bp (bp) = hd; caml_fl_cur_size += Whsize_wosize (0); } } /* If [bp] and [cur] are adjacent, remove [cur] from the free-list and merge them. */ adj = bp + Bosize_hd (hd); if (adj == Hp_bp (cur)){ char *next_cur = Next (cur); mlsize_t cur_whsz = Whsize_bp (cur); if (Wosize_hd (hd) + cur_whsz <= Max_wosize){ Next (prev) = next_cur; if (policy == Policy_next_fit && fl_prev == cur) fl_prev = prev; hd = Make_header (Wosize_hd (hd) + cur_whsz, 0, Caml_blue); Hd_bp (bp) = hd; adj = bp + Bosize_hd (hd); #ifdef DEBUG fl_last = NULL; Next (cur) = (char *) Debug_free_major; Hd_bp (cur) = Debug_free_major; #endif cur = next_cur; } } /* If [prev] and [bp] are adjacent merge them, else insert [bp] into the free-list if it is big enough. */ prev_wosz = Wosize_bp (prev); if (prev + Bsize_wsize (prev_wosz) == Hp_bp (bp) && prev_wosz + Whsize_hd (hd) < Max_wosize){ Hd_bp (prev) = Make_header (prev_wosz + Whsize_hd (hd), 0,Caml_blue); #ifdef DEBUG Hd_bp (bp) = Debug_free_major; #endif Assert (caml_fl_merge == prev); }else if (Wosize_hd (hd) != 0){ Hd_bp (bp) = Bluehd_hd (hd); Next (bp) = cur; Next (prev) = bp; caml_fl_merge = bp; }else{ /* This is a fragment. Leave it in white but remember it for eventual merging with the next block. */ last_fragment = bp; caml_fl_cur_size -= Whsize_wosize (0); } return adj; }
CAMLprim value caml_gc_set(value v) { uintnat newpf, newpm; asize_t newheapincr; asize_t newminwsz; uintnat oldpolicy; CAML_INSTR_SETUP (tmr, ""); caml_verb_gc = Long_val (Field (v, 3)); #ifndef NATIVE_CODE caml_change_max_stack_size (Long_val (Field (v, 5))); #endif newpf = norm_pfree (Long_val (Field (v, 2))); if (newpf != caml_percent_free){ caml_percent_free = newpf; caml_gc_message (0x20, "New space overhead: %d%%\n", caml_percent_free); } newpm = norm_pmax (Long_val (Field (v, 4))); if (newpm != caml_percent_max){ caml_percent_max = newpm; caml_gc_message (0x20, "New max overhead: %d%%\n", caml_percent_max); } newheapincr = Long_val (Field (v, 1)); if (newheapincr != caml_major_heap_increment){ caml_major_heap_increment = newheapincr; if (newheapincr > 1000){ caml_gc_message (0x20, "New heap increment size: %luk words\n", caml_major_heap_increment/1024); }else{ caml_gc_message (0x20, "New heap increment size: %lu%%\n", caml_major_heap_increment); } } oldpolicy = caml_allocation_policy; caml_set_allocation_policy (Long_val (Field (v, 6))); if (oldpolicy != caml_allocation_policy){ caml_gc_message (0x20, "New allocation policy: %d\n", caml_allocation_policy); } /* This field was added in 4.03.0. */ if (Wosize_val (v) >= 8){ int old_window = caml_major_window; caml_set_major_window (norm_window (Long_val (Field (v, 7)))); if (old_window != caml_major_window){ caml_gc_message (0x20, "New smoothing window size: %d\n", caml_major_window); } } /* Minor heap size comes last because it will trigger a minor collection (thus invalidating [v]) and it can raise [Out_of_memory]. */ newminwsz = norm_minsize (Long_val (Field (v, 0))); if (newminwsz != caml_minor_heap_wsz){ caml_gc_message (0x20, "New minor heap size: %luk words\n", newminwsz / 1024); caml_set_minor_heap_size (Bsize_wsize (newminwsz)); } CAML_INSTR_TIME (tmr, "explicit/gc_set"); return Val_unit; }
uintnat caml_heap_size(struct caml_heap_state* local) { return Bsize_wsize(local->stats.pool_words + local->stats.large_words); }