void caml_compact_heap (void) { uintnat target_size, live; do_compaction (); /* Compaction may fail to shrink the heap to a reasonable size because it deals in complete chunks: if a very large chunk is at the beginning of the heap, everything gets moved to it and it is not freed. In that case, we allocate a new chunk of the desired heap size, chain it at the beginning of the heap (thus pretending its address is smaller), and launch a second compaction. This will move all data to this new chunk and free the very large chunk. See PR#5389 */ /* We compute: freewords = caml_fl_cur_size (exact) heapsize = caml_heap_size (exact) live = heap_size - freewords target_size = live * (1 + caml_percent_free / 100) = live / 100 * (100 + caml_percent_free) We add 1 to live/100 to make sure it isn't 0. We recompact if target_size < heap_size / 2 */ live = caml_stat_heap_size - Bsize_wsize (caml_fl_cur_size); target_size = (live / 100 + 1) * (100 + caml_percent_free); target_size = caml_round_heap_chunk_size (target_size); if (target_size < caml_stat_heap_size / 2) { char *chunk; /* round it up to a page size */ chunk = caml_alloc_for_heap (target_size); if (chunk == NULL) return; caml_make_free_blocks ((value *) chunk, Wsize_bsize (Chunk_size (chunk)), 0); if (caml_page_table_add (In_heap, chunk, chunk + Chunk_size (chunk)) != 0) { caml_free_for_heap (chunk); return; } Chunk_next (chunk) = caml_heap_start; caml_heap_start = chunk; ++ caml_stat_heap_chunks; caml_stat_heap_size += Chunk_size (chunk); if (caml_stat_heap_size > caml_stat_top_heap_size) { caml_stat_top_heap_size = caml_stat_heap_size; } do_compaction (); Assert (caml_stat_heap_chunks == 1); Assert (Chunk_next (caml_heap_start) == NULL); } }
/* Allocate more memory from malloc for the heap. Return a blue block of at least the requested size. The blue block is chained to a sequence of blue blocks (through their field 0); the last block of the chain is pointed by field 1 of the first. There may be a fragment after the last block. The caller must insert the blocks into the free list. [request] is a number of words and must be less than or equal to [Max_wosize]. Return NULL when out of memory. */ static value *expand_heap (mlsize_t request) { /* these point to headers, but we do arithmetic on them, hence [value *]. */ value *mem, *hp, *prev; asize_t over_request, malloc_request, remain; Assert (request <= Max_wosize); over_request = Whsize_wosize (request + request / 100 * caml_percent_free); malloc_request = caml_round_heap_chunk_wsz (over_request); mem = (value *) caml_alloc_for_heap (Bsize_wsize (malloc_request)); if (mem == NULL){ caml_gc_message (0x04, "No room for growing heap\n", 0); return NULL; } remain = malloc_request; prev = hp = mem; /* FIXME find a way to do this with a call to caml_make_free_blocks */ while (Wosize_whsize (remain) > Max_wosize){ Hd_hp (hp) = Make_header (Max_wosize, 0, Caml_blue); #ifdef DEBUG caml_set_fields (Val_hp (hp), 0, Debug_free_major); #endif hp += Whsize_wosize (Max_wosize); remain -= Whsize_wosize (Max_wosize); Field (Val_hp (mem), 1) = Field (Val_hp (prev), 0) = Val_hp (hp); prev = hp; } if (remain > 1){ Hd_hp (hp) = Make_header (Wosize_whsize (remain), 0, Caml_blue); #ifdef DEBUG caml_set_fields (Val_hp (hp), 0, Debug_free_major); #endif Field (Val_hp (mem), 1) = Field (Val_hp (prev), 0) = Val_hp (hp); Field (Val_hp (hp), 0) = (value) NULL; }else{ Field (Val_hp (prev), 0) = (value) NULL; if (remain == 1) Hd_hp (hp) = Make_header (0, 0, Caml_white); } Assert (Wosize_hp (mem) >= request); if (caml_add_to_heap ((char *) mem) != 0){ caml_free_for_heap ((char *) mem); return NULL; } return Op_hp (mem); }
/* Allocate more memory from malloc for the heap. Return a blue block of at least the requested size. The blue block is chained to a sequence of blue blocks (through their field 0); the last block of the chain is pointed by field 1 of the first. There may be a fragment after the last block. The caller must insert the blocks into the free list. The request must be less than or equal to Max_wosize. Return NULL when out of memory. */ static char *expand_heap (mlsize_t request) { char *mem, *hp, *prev; asize_t over_request, malloc_request, remain; Assert (request <= Max_wosize); over_request = request + request / 100 * caml_percent_free; malloc_request = caml_round_heap_chunk_size (Bhsize_wosize (over_request)); mem = caml_alloc_for_heap (malloc_request); if (mem == NULL){ caml_gc_message (0x04, "No room for growing heap\n", 0); return NULL; } remain = malloc_request; prev = hp = mem; /* FIXME find a way to do this with a call to caml_make_free_blocks */ while (Wosize_bhsize (remain) > Max_wosize){ Hd_hp (hp) = Make_header (Max_wosize, 0, Caml_blue); #ifdef DEBUG caml_set_fields (Bp_hp (hp), 0, Debug_free_major); #endif hp += Bhsize_wosize (Max_wosize); remain -= Bhsize_wosize (Max_wosize); Field (Op_hp (mem), 1) = Field (Op_hp (prev), 0) = (value) Op_hp (hp); prev = hp; } if (remain > 1){ Hd_hp (hp) = Make_header (Wosize_bhsize (remain), 0, Caml_blue); #ifdef DEBUG caml_set_fields (Bp_hp (hp), 0, Debug_free_major); #endif Field (Op_hp (mem), 1) = Field (Op_hp (prev), 0) = (value) Op_hp (hp); Field (Op_hp (hp), 0) = (value) NULL; }else{ Field (Op_hp (prev), 0) = (value) NULL; if (remain == 1) Hd_hp (hp) = Make_header (0, 0, Caml_white); } Assert (Wosize_hp (mem) >= request); if (caml_add_to_heap (mem) != 0){ caml_free_for_heap (mem); return NULL; } return Bp_hp (mem); }
/* Allocate more memory from malloc for the heap. Return a blue block of at least the requested size (in words). The caller must insert the block into the free list. The request must be less than or equal to Max_wosize. Return NULL when out of memory. */ static char *expand_heap (mlsize_t request) { char *mem; asize_t malloc_request; malloc_request = caml_round_heap_chunk_size (Bhsize_wosize (request)); mem = caml_alloc_for_heap (malloc_request); if (mem == NULL){ caml_gc_message (0x04, "No room for growing heap\n", 0); return NULL; } Assert (Wosize_bhsize (malloc_request) >= request); Hd_hp (mem) = Make_header (Wosize_bhsize (malloc_request), 0, Caml_blue); if (caml_add_to_heap (mem) != 0){ caml_free_for_heap (mem); return NULL; } return Bp_hp (mem); }
static void ocamlpool_chunk_alloc(void) { ocamlpool_assert(ocamlpool_next_chunk_size > 1); void *block = caml_alloc_for_heap(ocamlpool_next_chunk_size * WORD_SIZE); abort_unless(block != NULL); size_t chunk_size = Chunk_size(block); ocamlpool_assert(IS_WORD_ALIGNED(chunk_size)); ocamlpool_color = caml_allocation_color(block); ocamlpool_allocated_chunks_counter += 1; size_t words = (chunk_size / WORD_SIZE); ocamlpool_root = (value)((value*)block + 1); /* Make it look like a well-formed string */ OCAMLPOOL_SET_HEADER(ocamlpool_root, words - 1, String_tag, ocamlpool_color); caml_add_to_heap(block); caml_allocated_words += words; }
value netsys_copy_value(value flags, value orig) { int code; int cflags; intnat start_offset, bytelen; mlsize_t wosize; char *dest, *dest_end, *extra_block, *extra_block_end; int color; struct named_custom_ops bigarray_ops; struct named_custom_ops int32_ops; struct named_custom_ops int64_ops; struct named_custom_ops nativeint_ops; CAMLparam2(orig,flags); CAMLlocal1(block); /* First test on trivial cases: */ if (Is_long(orig) || Wosize_val(orig) == 0) { CAMLreturn(orig); }; code = prep_stat_tab(); if (code != 0) goto exit; code = prep_stat_queue(); if (code != 0) goto exit; cflags = caml_convert_flag_list(flags, init_value_flags); /* fprintf (stderr, "counting\n"); */ /* Count only! */ code = netsys_init_value_1(stat_tab, stat_queue, NULL, NULL, orig, (cflags & 1) ? 1 : 0, /* enable_bigarrays */ (cflags & 2) ? 1 : 0, /* enable_customs */ 1, /* enable_atoms */ 1, /* simulate */ NULL, NULL, 0, &start_offset, &bytelen); if (code != 0) goto exit; /* fprintf (stderr, "done counting bytelen=%ld\n", bytelen); */ /* set up the custom ops. We always set this, because we assume that the values in [orig] are not trustworthy */ bigarray_ops.name = "_bigarray"; bigarray_ops.ops = Custom_ops_val(alloc_bigarray_dims(CAML_BA_UINT8 | BIGARRAY_C_LAYOUT, 1, NULL, 1)); bigarray_ops.next = &int32_ops; int32_ops.name = "_i"; int32_ops.ops = Custom_ops_val(caml_copy_int32(0)); int32_ops.next = &int64_ops; int64_ops.name = "_j"; int64_ops.ops = Custom_ops_val(caml_copy_int64(0)); int64_ops.next = &nativeint_ops; nativeint_ops.name = "_n"; nativeint_ops.ops = Custom_ops_val(caml_copy_nativeint(0)); nativeint_ops.next = NULL; /* alloc */ extra_block = NULL; extra_block_end = NULL; /* shamelessly copied from intern.c */ wosize = Wosize_bhsize(bytelen); /* fprintf (stderr, "wosize=%ld\n", wosize); */ if (wosize > Max_wosize) { /* Round desired size up to next page */ asize_t request = ((bytelen + Page_size - 1) >> Page_log) << Page_log; extra_block = caml_alloc_for_heap(request); if (extra_block == NULL) caml_raise_out_of_memory(); extra_block_end = extra_block + request; color = caml_allocation_color(extra_block); dest = extra_block; dest_end = dest + bytelen; block = Val_hp(extra_block); } else {
void caml_compact_heap_r (CAML_R) { uintnat target_words, target_size, live; do_compaction_r (ctx); /* Compaction may fail to shrink the heap to a reasonable size because it deals in complete chunks: if a very large chunk is at the beginning of the heap, everything gets moved to it and it is not freed. In that case, we allocate a new chunk of the desired heap size, chain it at the beginning of the heap (thus pretending its address is smaller), and launch a second compaction. This will move all data to this new chunk and free the very large chunk. See PR#5389 */ /* We compute: freewords = caml_fl_cur_size (exact) heapwords = Wsize_bsize (caml_heap_size) (exact) live = heapwords - freewords wanted = caml_percent_free * (live / 100 + 1) (same as in do_compaction) target_words = live + wanted We add one page to make sure a small difference in counting sizes won't make [do_compaction] keep the second block (and break all sorts of invariants). We recompact if target_size < heap_size / 2 */ live = Wsize_bsize (caml_stat_heap_size) - caml_fl_cur_size; target_words = live + caml_percent_free * (live / 100 + 1) + Wsize_bsize (Page_size); target_size = caml_round_heap_chunk_size_r (ctx, Bsize_wsize (target_words)); if (target_size < caml_stat_heap_size / 2){ char *chunk; caml_gc_message (0x10, "Recompacting heap (target=%luk)\n", target_size / 1024); chunk = caml_alloc_for_heap (target_size); if (chunk == NULL) return; /* PR#5757: we need to make the new blocks blue, or they won't be recognized as free by the recompaction. */ caml_make_free_blocks_r (ctx, (value *) chunk, Wsize_bsize (Chunk_size (chunk)), 0, Caml_blue); if (caml_page_table_add_r (ctx, In_heap, chunk, chunk + Chunk_size (chunk)) != 0){ caml_free_for_heap (chunk); return; } Chunk_next (chunk) = caml_heap_start; caml_heap_start = chunk; ++ caml_stat_heap_chunks; caml_stat_heap_size += Chunk_size (chunk); if (caml_stat_heap_size > caml_stat_top_heap_size){ caml_stat_top_heap_size = caml_stat_heap_size; } do_compaction_r (ctx); Assert (caml_stat_heap_chunks == 1); Assert (Chunk_next (caml_heap_start) == NULL); Assert (caml_stat_heap_size == Chunk_size (chunk)); } }