static void check_block (char *hp) { mlsize_t i; value v = Val_hp (hp); value f; check_head (v); switch (Tag_hp (hp)) { case Abstract_tag: break; case String_tag: break; case Double_tag: Assert (Wosize_val (v) == Double_wosize); break; case Double_array_tag: Assert (Wosize_val (v) % Double_wosize == 0); break; case Custom_tag: Assert (!Is_in_heap (Custom_ops_val (v))); break; case Infix_tag: Assert (0); break; default: Assert (Tag_hp (hp) < No_scan_tag); for (i = 0; i < Wosize_hp (hp); i++) { f = Field (v, i); if (Is_block (f) && Is_in_heap (f)) check_head (f); } } }
/* Allocate more memory from malloc for the heap. Return a blue block of at least the requested size. The blue block is chained to a sequence of blue blocks (through their field 0); the last block of the chain is pointed by field 1 of the first. There may be a fragment after the last block. The caller must insert the blocks into the free list. [request] is a number of words and must be less than or equal to [Max_wosize]. Return NULL when out of memory. */ static value *expand_heap (mlsize_t request) { /* these point to headers, but we do arithmetic on them, hence [value *]. */ value *mem, *hp, *prev; asize_t over_request, malloc_request, remain; Assert (request <= Max_wosize); over_request = Whsize_wosize (request + request / 100 * caml_percent_free); malloc_request = caml_round_heap_chunk_wsz (over_request); mem = (value *) caml_alloc_for_heap (Bsize_wsize (malloc_request)); if (mem == NULL){ caml_gc_message (0x04, "No room for growing heap\n", 0); return NULL; } remain = malloc_request; prev = hp = mem; /* FIXME find a way to do this with a call to caml_make_free_blocks */ while (Wosize_whsize (remain) > Max_wosize){ Hd_hp (hp) = Make_header (Max_wosize, 0, Caml_blue); #ifdef DEBUG caml_set_fields (Val_hp (hp), 0, Debug_free_major); #endif hp += Whsize_wosize (Max_wosize); remain -= Whsize_wosize (Max_wosize); Field (Val_hp (mem), 1) = Field (Val_hp (prev), 0) = Val_hp (hp); prev = hp; } if (remain > 1){ Hd_hp (hp) = Make_header (Wosize_whsize (remain), 0, Caml_blue); #ifdef DEBUG caml_set_fields (Val_hp (hp), 0, Debug_free_major); #endif Field (Val_hp (mem), 1) = Field (Val_hp (prev), 0) = Val_hp (hp); Field (Val_hp (hp), 0) = (value) NULL; }else{ Field (Val_hp (prev), 0) = (value) NULL; if (remain == 1) Hd_hp (hp) = Make_header (0, 0, Caml_white); } Assert (Wosize_hp (mem) >= request); if (caml_add_to_heap ((char *) mem) != 0){ caml_free_for_heap ((char *) mem); return NULL; } return Op_hp (mem); }
/* Allocate more memory from malloc for the heap. Return a blue block of at least the requested size. The blue block is chained to a sequence of blue blocks (through their field 0); the last block of the chain is pointed by field 1 of the first. There may be a fragment after the last block. The caller must insert the blocks into the free list. The request must be less than or equal to Max_wosize. Return NULL when out of memory. */ static char *expand_heap (mlsize_t request) { char *mem, *hp, *prev; asize_t over_request, malloc_request, remain; Assert (request <= Max_wosize); over_request = request + request / 100 * caml_percent_free; malloc_request = caml_round_heap_chunk_size (Bhsize_wosize (over_request)); mem = caml_alloc_for_heap (malloc_request); if (mem == NULL){ caml_gc_message (0x04, "No room for growing heap\n", 0); return NULL; } remain = malloc_request; prev = hp = mem; /* FIXME find a way to do this with a call to caml_make_free_blocks */ while (Wosize_bhsize (remain) > Max_wosize){ Hd_hp (hp) = Make_header (Max_wosize, 0, Caml_blue); #ifdef DEBUG caml_set_fields (Bp_hp (hp), 0, Debug_free_major); #endif hp += Bhsize_wosize (Max_wosize); remain -= Bhsize_wosize (Max_wosize); Field (Op_hp (mem), 1) = Field (Op_hp (prev), 0) = (value) Op_hp (hp); prev = hp; } if (remain > 1){ Hd_hp (hp) = Make_header (Wosize_bhsize (remain), 0, Caml_blue); #ifdef DEBUG caml_set_fields (Bp_hp (hp), 0, Debug_free_major); #endif Field (Op_hp (mem), 1) = Field (Op_hp (prev), 0) = (value) Op_hp (hp); Field (Op_hp (hp), 0) = (value) NULL; }else{ Field (Op_hp (prev), 0) = (value) NULL; if (remain == 1) Hd_hp (hp) = Make_header (0, 0, Caml_white); } Assert (Wosize_hp (mem) >= request); if (caml_add_to_heap (mem) != 0){ caml_free_for_heap (mem); return NULL; } return Bp_hp (mem); }
int netsys_init_value_1(struct htab *t, struct nqueue *q, char *dest, char *dest_end, value orig, int enable_bigarrays, int enable_customs, int enable_atoms, int simulation, void *target_addr, struct named_custom_ops *target_custom_ops, int color, intnat *start_offset, intnat *bytelen ) { void *orig_addr; void *work_addr; value work; int work_tag; char *work_header; size_t work_bytes; size_t work_words; void *copy_addr; value copy; char *copy_header; header_t copy_header1; int copy_tag; size_t copy_words; void *fixup_addr; char *dest_cur; char *dest_ptr; int code, i; intnat addr_delta; struct named_custom_ops *ops_ptr; void *int32_target_ops; void *int64_target_ops; void *nativeint_target_ops; void *bigarray_target_ops; copy = 0; dest_cur = dest; addr_delta = ((char *) target_addr) - dest; if (dest_cur >= dest_end && !simulation) return (-4); /* out of space */ if (!Is_block(orig)) return (-2); orig_addr = (void *) orig; code = netsys_queue_add(q, orig_addr); if (code != 0) return code; /* initialize *_target_ops */ bigarray_target_ops = NULL; int32_target_ops = NULL; int64_target_ops = NULL; nativeint_target_ops = NULL; ops_ptr = target_custom_ops; while (ops_ptr != NULL) { if (strcmp(ops_ptr->name, "_bigarray") == 0) bigarray_target_ops = ops_ptr->ops; else if (strcmp(ops_ptr->name, "_i") == 0) int32_target_ops = ops_ptr->ops; else if (strcmp(ops_ptr->name, "_j") == 0) int64_target_ops = ops_ptr->ops; else if (strcmp(ops_ptr->name, "_n") == 0) nativeint_target_ops = ops_ptr->ops; ops_ptr = ops_ptr->next; }; /* First pass: Iterate over the addresses found in q. Ignore addresses already seen in the past (which are in t). For new addresses, make a copy, and add these copies to t. */ /* fprintf(stderr, "first pass, orig_addr=%lx simulation=%d addr_delta=%lx\n", (unsigned long) orig_addr, simulation, addr_delta); */ code = netsys_queue_take(q, &work_addr); while (code != (-3)) { if (code != 0) return code; /* fprintf(stderr, "work_addr=%lx\n", (unsigned long) work_addr); */ code = netsys_htab_lookup(t, work_addr, ©_addr); if (code != 0) return code; if (copy_addr == NULL) { /* The address is unknown, so copy the value */ /* Body of first pass */ work = (value) work_addr; work_tag = Tag_val(work); work_header = Hp_val(work); if (work_tag < No_scan_tag) { /* It is a scanned value (with subvalues) */ switch(work_tag) { case Object_tag: case Closure_tag: case Lazy_tag: case Forward_tag: return (-2); /* unsupported */ } work_words = Wosize_hp(work_header); if (work_words == 0) { if (!enable_atoms) return (-2); if (enable_atoms == 1) goto next; }; /* Do the copy. */ work_bytes = Bhsize_hp(work_header); copy_header = dest_cur; dest_cur += work_bytes; if (dest_cur > dest_end && !simulation) return (-4); if (simulation) copy_addr = work_addr; else { memcpy(copy_header, work_header, work_bytes); copy = Val_hp(copy_header); copy_addr = (void *) copy; Hd_val(copy) = Whitehd_hd(Hd_val(copy)) | color; } /* Add the association (work_addr -> copy_addr) to t: */ code = netsys_htab_add(t, work_addr, copy_addr); if (code < 0) return code; /* Add the sub values of work_addr to q: */ for (i=0; i < work_words; ++i) { value field = Field(work, i); if (Is_block (field)) { code = netsys_queue_add(q, (void *) field); if (code != 0) return code; } } } else { /* It an opaque value */ int do_copy = 0; int do_bigarray = 0; void *target_ops = NULL; char caml_id = ' '; /* only b, i, j, n */ /* Check for bigarrays and other custom blocks */ switch (work_tag) { case Abstract_tag: return(-2); case String_tag: do_copy = 1; break; case Double_tag: do_copy = 1; break; case Double_array_tag: do_copy = 1; break; case Custom_tag: { struct custom_operations *custom_ops; char *id; custom_ops = Custom_ops_val(work); id = custom_ops->identifier; if (id[0] == '_') { switch (id[1]) { case 'b': if (!enable_bigarrays) return (-2); if (strcmp(id, "_bigarray") == 0) { caml_id = 'b'; break; } case 'i': /* int32 */ case 'j': /* int64 */ case 'n': /* nativeint */ if (!enable_customs) return (-2); if (id[2] == 0) { caml_id = id[1]; break; } default: return (-2); } } else return (-2); } }; /* switch */ switch (caml_id) { /* look closer at some cases */ case 'b': { target_ops = bigarray_target_ops; do_copy = 1; do_bigarray = 1; break; } case 'i': target_ops = int32_target_ops; do_copy = 1; break; case 'j': target_ops = int64_target_ops; do_copy = 1; break; case 'n': target_ops = nativeint_target_ops; do_copy = 1; break; }; if (do_copy) { /* Copy the value */ work_bytes = Bhsize_hp(work_header); copy_header = dest_cur; dest_cur += work_bytes; if (simulation) copy_addr = work_addr; else { if (dest_cur > dest_end) return (-4); memcpy(copy_header, work_header, work_bytes); copy = Val_hp(copy_header); copy_addr = (void *) copy; Hd_val(copy) = Whitehd_hd(Hd_val(copy)) | color; if (target_ops != NULL) Custom_ops_val(copy) = target_ops; } code = netsys_htab_add(t, work_addr, copy_addr); if (code < 0) return code; } if (do_bigarray) { /* postprocessing for copying bigarrays */ struct caml_ba_array *b_work, *b_copy; void * data_copy; char * data_header; header_t data_header1; size_t size = 1; size_t size_aligned; size_t size_words; b_work = Bigarray_val(work); b_copy = Bigarray_val(copy); for (i = 0; i < b_work->num_dims; i++) { size = size * b_work->dim[i]; }; size = size * caml_ba_element_size[b_work->flags & BIGARRAY_KIND_MASK]; size_aligned = size; if (size%sizeof(void *) != 0) size_aligned += sizeof(void *) - (size%sizeof(void *)); size_words = Wsize_bsize(size_aligned); /* If we put the copy of the bigarray into our own dest buffer, also generate an abstract header, so it can be skipped when iterating over it. We use here a special representation, so we can encode any length in this header (with a normal Ocaml header we are limited by Max_wosize, e.g. 16M on 32 bit systems). The special representation is an Abstract_tag with zero length, followed by the real length (in words) */ if (enable_bigarrays == 2) { data_header = dest_cur; dest_cur += 2*sizeof(void *); data_copy = dest_cur; dest_cur += size_aligned; } else if (!simulation) { data_header = NULL; data_copy = stat_alloc(size_aligned); }; if (!simulation) { if (dest_cur > dest_end) return (-4); /* Initialize header: */ if (data_header != NULL) { data_header1 = Abstract_tag; memcpy(data_header, (char *) &data_header1, sizeof(header_t)); memcpy(data_header + sizeof(header_t), (size_t *) &size_words, sizeof(size_t)); }; /* Copy bigarray: */ memcpy(data_copy, b_work->data, size); b_copy->data = data_copy; b_copy->proxy = NULL; /* If the copy is in our own buffer, it is now externally managed. */ b_copy->flags = (b_copy->flags & ~CAML_BA_MANAGED_MASK) | (enable_bigarrays == 2 ? CAML_BA_EXTERNAL : CAML_BA_MANAGED); } } } /* if (work_tag < No_scan_tag) */ } /* if (copy_addr == NULL) */ /* Switch to next address in q: */ next: code = netsys_queue_take(q, &work_addr); } /* while */ /* Second pass. The copied blocks still have fields pointing to the original blocks. We fix that now by iterating once over the copied memory block. */ if (!simulation) { /* fprintf(stderr, "second pass\n"); */ dest_ptr = dest; while (dest_ptr < dest_cur) { copy_header1 = *((header_t *) dest_ptr); copy_tag = Tag_hd(copy_header1); copy_words = Wosize_hd(copy_header1); copy = (value) (dest_ptr + sizeof(void *)); if (copy_tag < No_scan_tag) { for (i=0; i < copy_words; ++i) { value field = Field(copy, i); if (Is_block (field)) { /* It is a pointer. Try to fix it up. */ code = netsys_htab_lookup(t, (void *) field, &fixup_addr); if (code != 0) return code; if (fixup_addr != NULL) Field(copy,i) = (value) (((char *) fixup_addr) + addr_delta); } } } else if (copy_tag == Abstract_tag && copy_words == 0) { /* our special representation for skipping data regions */ copy_words = ((size_t *) dest_ptr)[1] + 1; }; dest_ptr += (copy_words + 1) * sizeof(void *); } } /* hey, fine. Return result */ *start_offset = sizeof(void *); *bytelen = dest_cur - dest; /* fprintf(stderr, "return regularly\n");*/ return 0; }
value gc_stat (value v) /* ML */ { value res; long live_words = 0, live_blocks = 0, free_words = 0, free_blocks = 0, largest_free = 0, fragments = 0, heap_chunks = 0; char *chunk = heap_start, *chunk_end; char *cur_hp, *prev_hp; header_t cur_hd; Assert (v == Atom (0)); while (chunk != NULL){ ++ heap_chunks; chunk_end = chunk + Chunk_size (chunk); prev_hp = NULL; cur_hp = chunk; while (cur_hp < chunk_end){ cur_hd = Hd_hp (cur_hp); switch (Color_hd (cur_hd)){ case White: if (Wosize_hd (cur_hd) == 0){ ++fragments; Assert (prev_hp == NULL || (Color_hp (prev_hp) != Blue && Wosize_hp (prev_hp) > 0)); Assert (Next (cur_hp) == chunk_end || (Color_hp (Next (cur_hp)) != Blue && Wosize_hp (Next (cur_hp)) > 0)); break; } /* FALLTHROUGH */ case Gray: case Black: Assert (Wosize_hd (cur_hd) > 0); ++ live_blocks; live_words += Whsize_hd (cur_hd); break; case Blue: Assert (Wosize_hd (cur_hd) > 0); ++ free_blocks; free_words += Whsize_hd (cur_hd); if (Whsize_hd (cur_hd) > largest_free){ largest_free = Whsize_hd (cur_hd); } Assert (prev_hp == NULL || (Color_hp (prev_hp) != Blue && Wosize_hp (prev_hp) > 0)); Assert (Next (cur_hp) == chunk_end || (Color_hp (Next (cur_hp)) != Blue && Wosize_hp (Next (cur_hp)) > 0)); break; } prev_hp = cur_hp; cur_hp = Next (cur_hp); } Assert (cur_hp == chunk_end); chunk = Chunk_next (chunk); } Assert (live_words + free_words + fragments == Wsize_bsize (stat_heap_size)); /* Order of elements changed for Moscow ML */ res = alloc (13, 0); Field (res, 11) = Val_long (stat_minor_words + Wsize_bsize (young_ptr - young_start)); Field (res, 12) = Val_long (stat_promoted_words); Field (res, 9) = Val_long (stat_major_words + allocated_words); Field (res, 10) = Val_long (stat_minor_collections); Field (res, 8) = Val_long (stat_major_collections); Field (res, 4) = Val_long (Wsize_bsize (stat_heap_size)); Field (res, 3) = Val_long (heap_chunks); Field (res, 7) = Val_long (live_words); Field (res, 6) = Val_long (live_blocks); Field (res, 2) = Val_long (free_words); Field (res, 1) = Val_long (free_blocks); Field (res, 5) = Val_long (largest_free); Field (res, 0) = Val_long (fragments); return res; }