/* Allocate more memory from malloc for the heap. Return a blue block of at least the requested size. The blue block is chained to a sequence of blue blocks (through their field 0); the last block of the chain is pointed by field 1 of the first. There may be a fragment after the last block. The caller must insert the blocks into the free list. [request] is a number of words and must be less than or equal to [Max_wosize]. Return NULL when out of memory. */ static value *expand_heap (mlsize_t request) { /* these point to headers, but we do arithmetic on them, hence [value *]. */ value *mem, *hp, *prev; asize_t over_request, malloc_request, remain; Assert (request <= Max_wosize); over_request = Whsize_wosize (request + request / 100 * caml_percent_free); malloc_request = caml_round_heap_chunk_wsz (over_request); mem = (value *) caml_alloc_for_heap (Bsize_wsize (malloc_request)); if (mem == NULL){ caml_gc_message (0x04, "No room for growing heap\n", 0); return NULL; } remain = malloc_request; prev = hp = mem; /* FIXME find a way to do this with a call to caml_make_free_blocks */ while (Wosize_whsize (remain) > Max_wosize){ Hd_hp (hp) = Make_header (Max_wosize, 0, Caml_blue); #ifdef DEBUG caml_set_fields (Val_hp (hp), 0, Debug_free_major); #endif hp += Whsize_wosize (Max_wosize); remain -= Whsize_wosize (Max_wosize); Field (Val_hp (mem), 1) = Field (Val_hp (prev), 0) = Val_hp (hp); prev = hp; } if (remain > 1){ Hd_hp (hp) = Make_header (Wosize_whsize (remain), 0, Caml_blue); #ifdef DEBUG caml_set_fields (Val_hp (hp), 0, Debug_free_major); #endif Field (Val_hp (mem), 1) = Field (Val_hp (prev), 0) = Val_hp (hp); Field (Val_hp (hp), 0) = (value) NULL; }else{ Field (Val_hp (prev), 0) = (value) NULL; if (remain == 1) Hd_hp (hp) = Make_header (0, 0, Caml_white); } Assert (Wosize_hp (mem) >= request); if (caml_add_to_heap ((char *) mem) != 0){ caml_free_for_heap ((char *) mem); return NULL; } return Op_hp (mem); }
/* Cut a block of memory into Max_wosize pieces, give them headers, and optionally merge them into the free list. arguments: p: pointer to the first word of the block size: size of the block (in words) do_merge: 1 -> do merge; 0 -> do not merge color: which color to give to the pieces; if [do_merge] is 1, this is overridden by the merge code, but we have historically used [Caml_white]. */ void caml_make_free_blocks (value *p, mlsize_t size, int do_merge, int color) { mlsize_t sz; while (size > 0){ if (size > Whsize_wosize (Max_wosize)){ sz = Whsize_wosize (Max_wosize); }else{ sz = size; } *(header_t *)p = Make_header (Wosize_whsize (sz), 0, color); if (do_merge) caml_fl_merge_block (Bp_hp (p)); size -= sz; p += sz; } }
value res; mlsize_t whsize, wosize; unsigned long bhsize; color_t color; header_t hd; whsize = getword(chan); if (whsize == 0) { res = (value) getword(chan); if (IS_LONG(res)) return res; else return Atom(res >> 2); } bhsize = Bsize_wsize (whsize); wosize = Wosize_whsize (whsize); #if (SIZEOF_LONG_P == 8) if (magic == Little_endian_32_magic_number || magic == Big_endian_32_magic_number) { /* Expansion 32 -> 64 required */ mlsize_t whsize32; value32 * block; whsize32 = whsize; block = (value32 *) stat_alloc(whsize32 * sizeof(value32)); if (really_getblock(chan, (char *) block, whsize32 * sizeof(value32)) == 0) { stat_free((char *) block); failwith ("intern : truncated object"); } if (magic == Wrong_endian_32_magic_number) rev_pointers_32(block, whsize32);
static void do_compaction_r (CAML_R) { char *ch, *chend; Assert (caml_gc_phase == Phase_idle); caml_gc_message (0x10, "Compacting heap...\n", 0); #ifdef DEBUG caml_heap_check_r (ctx); #endif /* First pass: encode all noninfix headers. */ { ch = caml_heap_start; while (ch != NULL){ header_t *p = (header_t *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ header_t hd = Hd_hp (p); mlsize_t sz = Wosize_hd (hd); if (Is_blue_hd (hd)){ /* Free object. Give it a string tag. */ Hd_hp (p) = Make_ehd (sz, String_tag, 3); }else{ Assert (Is_white_hd (hd)); /* Live object. Keep its tag. */ Hd_hp (p) = Make_ehd (sz, Tag_hd (hd), 3); } p += Whsize_wosize (sz); } ch = Chunk_next (ch); } } /* Second pass: invert pointers. Link infix headers in each block in an inverted list of inverted lists. Don't forget roots and weak pointers. */ { /* Invert roots first because the threads library needs some heap data structures to find its roots. Fortunately, it doesn't need the headers (see above). */ caml_do_roots_r (ctx, invert_root_r); caml_final_do_weak_roots_r (ctx, invert_root_r); ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ word q = *p; size_t sz, i; tag_t t; word *infixes; while (Ecolor (q) == 0) q = * (word *) q; sz = Whsize_ehd (q); t = Tag_ehd (q); if (t == Infix_tag){ /* Get the original header of this block. */ infixes = p + sz; q = *infixes; while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3); sz = Whsize_ehd (q); t = Tag_ehd (q); } if (t < No_scan_tag){ for (i = 1; i < sz; i++) invert_pointer_at_r (ctx, &(p[i])); } p += sz; } ch = Chunk_next (ch); } /* Invert weak pointers. */ { value *pp = &caml_weak_list_head; value p; word q; size_t sz, i; while (1){ p = *pp; if (p == (value) NULL) break; q = Hd_val (p); while (Ecolor (q) == 0) q = * (word *) q; sz = Wosize_ehd (q); for (i = 1; i < sz; i++){ if (Field (p,i) != caml_weak_none){ invert_pointer_at_r (ctx, (word *) &(Field (p,i))); } } invert_pointer_at_r (ctx, (word *) pp); pp = &Field (p, 0); } } } /* Third pass: reallocate virtually; revert pointers; decode headers. Rebuild infix headers. */ { init_compact_allocate_r (ctx); ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ word q = *p; if (Ecolor (q) == 0 || Tag_ehd (q) == Infix_tag){ /* There were (normal or infix) pointers to this block. */ size_t sz; tag_t t; char *newadr; word *infixes = NULL; while (Ecolor (q) == 0) q = * (word *) q; sz = Whsize_ehd (q); t = Tag_ehd (q); if (t == Infix_tag){ /* Get the original header of this block. */ infixes = p + sz; q = *infixes; Assert (Ecolor (q) == 2); while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3); sz = Whsize_ehd (q); t = Tag_ehd (q); } newadr = compact_allocate_r (ctx, Bsize_wsize (sz)); q = *p; while (Ecolor (q) == 0){ word next = * (word *) q; * (word *) q = (word) Val_hp (newadr); q = next; } *p = Make_header (Wosize_whsize (sz), t, Caml_white); if (infixes != NULL){ /* Rebuild the infix headers and revert the infix pointers. */ while (Ecolor ((word) infixes) != 3){ infixes = (word *) ((word) infixes & ~(uintnat) 3); q = *infixes; while (Ecolor (q) == 2){ word next; q = (word) q & ~(uintnat) 3; next = * (word *) q; * (word *) q = (word) Val_hp ((word *) newadr + (infixes - p)); q = next; } Assert (Ecolor (q) == 1 || Ecolor (q) == 3); *infixes = Make_header (infixes - p, Infix_tag, Caml_white); infixes = (word *) q; } } p += sz; }else{ Assert (Ecolor (q) == 3); /* This is guaranteed only if caml_compact_heap was called after a nonincremental major GC: Assert (Tag_ehd (q) == String_tag); */ /* No pointers to the header and no infix header: the object was free. */ *p = Make_header (Wosize_ehd (q), Tag_ehd (q), Caml_blue); p += Whsize_ehd (q); } } ch = Chunk_next (ch); } } /* Fourth pass: reallocate and move objects. Use the exact same allocation algorithm as pass 3. */ { init_compact_allocate_r (ctx); ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ word q = *p; if (Color_hd (q) == Caml_white){ size_t sz = Bhsize_hd (q); char *newadr = compact_allocate_r (ctx, sz); memmove (newadr, p, sz); p += Wsize_bsize (sz); }else{ Assert (Color_hd (q) == Caml_blue); p += Whsize_hd (q); } } ch = Chunk_next (ch); } } /* Shrink the heap if needed. */ { /* Find the amount of live data and the unshrinkable free space. */ asize_t live = 0; asize_t free = 0; asize_t wanted; ch = caml_heap_start; while (ch != NULL){ if (Chunk_alloc (ch) != 0){ live += Wsize_bsize (Chunk_alloc (ch)); free += Wsize_bsize (Chunk_size (ch) - Chunk_alloc (ch)); } ch = Chunk_next (ch); } /* Add up the empty chunks until there are enough, then remove the other empty chunks. */ wanted = caml_percent_free * (live / 100 + 1); ch = caml_heap_start; while (ch != NULL){ char *next_chunk = Chunk_next (ch); /* Chunk_next (ch) will be erased */ if (Chunk_alloc (ch) == 0){ if (free < wanted){ free += Wsize_bsize (Chunk_size (ch)); }else{ caml_shrink_heap_r (ctx, ch); } } ch = next_chunk; } } /* Rebuild the free list. */ { ch = caml_heap_start; caml_fl_reset_r (ctx); while (ch != NULL){ if (Chunk_size (ch) > Chunk_alloc (ch)){ caml_make_free_blocks_r (ctx, (value *) (ch + Chunk_alloc (ch)), Wsize_bsize (Chunk_size(ch)-Chunk_alloc(ch)), 1, Caml_white); } ch = Chunk_next (ch); } } ++ caml_stat_compactions; caml_gc_message (0x10, "done.\n", 0); }