/* [size] is a number of bytes */ CAMLexport value caml_alloc_custom(struct custom_operations * ops, uintnat size, mlsize_t mem, mlsize_t max) { mlsize_t wosize; value result; wosize = 1 + (size + sizeof(value) - 1) / sizeof(value); if (wosize <= Max_young_wosize) { result = caml_alloc_small(wosize, Custom_tag); Custom_ops_val(result) = ops; if (ops->finalize != NULL) { /* Remembered that the block has a finalizer */ if (caml_finalize_table.ptr >= caml_finalize_table.limit){ CAMLassert (caml_finalize_table.ptr == caml_finalize_table.limit); caml_realloc_ref_table (&caml_finalize_table); } *caml_finalize_table.ptr++ = (value *)result; } } else { result = caml_alloc_shr(wosize, Custom_tag); Custom_ops_val(result) = ops; caml_adjust_gc_speed(mem, max); result = caml_check_urgent_gc(result); } return result; }
static void check_block (char *hp) { mlsize_t i; value v = Val_hp (hp); value f; check_head (v); switch (Tag_hp (hp)) { case Abstract_tag: break; case String_tag: break; case Double_tag: Assert (Wosize_val (v) == Double_wosize); break; case Double_array_tag: Assert (Wosize_val (v) % Double_wosize == 0); break; case Custom_tag: Assert (!Is_in_heap (Custom_ops_val (v))); break; case Infix_tag: Assert (0); break; default: Assert (Tag_hp (hp) < No_scan_tag); for (i = 0; i < Wosize_hp (hp); i++) { f = Field (v, i); if (Is_block (f) && Is_in_heap (f)) check_head (f); } } }
CAMLexport value caml_alloc_custom(struct custom_operations * ops, uintnat size, mlsize_t mem, mlsize_t max) { mlsize_t wosize; value result; wosize = 1 + (size + sizeof(value) - 1) / sizeof(value); if (ops->finalize == NULL && wosize <= Max_young_wosize) { result = caml_alloc_small(wosize, Custom_tag); Custom_ops_val(result) = ops; } else { result = caml_alloc_shr(wosize, Custom_tag); Custom_ops_val(result) = ops; caml_adjust_gc_speed(mem, max); result = caml_check_urgent_gc(result); } return result; }
static void hash_aux(value obj) { unsigned char * p; mlsize_t i, j; tag_t tag; hash_univ_limit--; if (hash_univ_count < 0 || hash_univ_limit < 0) return; again: if (Is_long(obj)) { hash_univ_count--; Combine(Long_val(obj)); return; } /* Pointers into the heap are well-structured blocks. So are atoms. We can inspect the block contents. */ Assert (Is_block (obj)); if (Is_in_value_area(obj)) { tag = Tag_val(obj); switch (tag) { case String_tag: hash_univ_count--; i = caml_string_length(obj); for (p = &Byte_u(obj, 0); i > 0; i--, p++) Combine_small(*p); break; case Double_tag: /* For doubles, we inspect their binary representation, LSB first. The results are consistent among all platforms with IEEE floats. */ hash_univ_count--; #ifdef ARCH_BIG_ENDIAN for (p = &Byte_u(obj, sizeof(double) - 1), i = sizeof(double); i > 0; p--, i--) #else for (p = &Byte_u(obj, 0), i = sizeof(double); i > 0; p++, i--) #endif Combine_small(*p); break; case Double_array_tag: hash_univ_count--; for (j = 0; j < Bosize_val(obj); j += sizeof(double)) { #ifdef ARCH_BIG_ENDIAN for (p = &Byte_u(obj, j + sizeof(double) - 1), i = sizeof(double); i > 0; p--, i--) #else for (p = &Byte_u(obj, j), i = sizeof(double); i > 0; p++, i--) #endif Combine_small(*p); } break; case Abstract_tag: /* We don't know anything about the contents of the block. Better do nothing. */ break; case Infix_tag: hash_aux(obj - Infix_offset_val(obj)); break; case Forward_tag: obj = Forward_val (obj); goto again; case Object_tag: hash_univ_count--; Combine(Oid_val(obj)); break; case Custom_tag: /* If no hashing function provided, do nothing */ if (Custom_ops_val(obj)->hash != NULL) { hash_univ_count--; Combine(Custom_ops_val(obj)->hash(obj)); } break; default: hash_univ_count--; Combine_small(tag); i = Wosize_val(obj); while (i != 0) { i--; hash_aux(Field(obj, i)); } break; } return; } /* Otherwise, obj is a pointer outside the heap, to an object with a priori unknown structure. Use its physical address as hash key. */ Combine((intnat) obj); }
int netsys_init_value_1(struct htab *t, struct nqueue *q, char *dest, char *dest_end, value orig, int enable_bigarrays, int enable_customs, int enable_atoms, int simulation, void *target_addr, struct named_custom_ops *target_custom_ops, int color, intnat *start_offset, intnat *bytelen ) { void *orig_addr; void *work_addr; value work; int work_tag; char *work_header; size_t work_bytes; size_t work_words; void *copy_addr; value copy; char *copy_header; header_t copy_header1; int copy_tag; size_t copy_words; void *fixup_addr; char *dest_cur; char *dest_ptr; int code, i; intnat addr_delta; struct named_custom_ops *ops_ptr; void *int32_target_ops; void *int64_target_ops; void *nativeint_target_ops; void *bigarray_target_ops; copy = 0; dest_cur = dest; addr_delta = ((char *) target_addr) - dest; if (dest_cur >= dest_end && !simulation) return (-4); /* out of space */ if (!Is_block(orig)) return (-2); orig_addr = (void *) orig; code = netsys_queue_add(q, orig_addr); if (code != 0) return code; /* initialize *_target_ops */ bigarray_target_ops = NULL; int32_target_ops = NULL; int64_target_ops = NULL; nativeint_target_ops = NULL; ops_ptr = target_custom_ops; while (ops_ptr != NULL) { if (strcmp(ops_ptr->name, "_bigarray") == 0) bigarray_target_ops = ops_ptr->ops; else if (strcmp(ops_ptr->name, "_i") == 0) int32_target_ops = ops_ptr->ops; else if (strcmp(ops_ptr->name, "_j") == 0) int64_target_ops = ops_ptr->ops; else if (strcmp(ops_ptr->name, "_n") == 0) nativeint_target_ops = ops_ptr->ops; ops_ptr = ops_ptr->next; }; /* First pass: Iterate over the addresses found in q. Ignore addresses already seen in the past (which are in t). For new addresses, make a copy, and add these copies to t. */ /* fprintf(stderr, "first pass, orig_addr=%lx simulation=%d addr_delta=%lx\n", (unsigned long) orig_addr, simulation, addr_delta); */ code = netsys_queue_take(q, &work_addr); while (code != (-3)) { if (code != 0) return code; /* fprintf(stderr, "work_addr=%lx\n", (unsigned long) work_addr); */ code = netsys_htab_lookup(t, work_addr, ©_addr); if (code != 0) return code; if (copy_addr == NULL) { /* The address is unknown, so copy the value */ /* Body of first pass */ work = (value) work_addr; work_tag = Tag_val(work); work_header = Hp_val(work); if (work_tag < No_scan_tag) { /* It is a scanned value (with subvalues) */ switch(work_tag) { case Object_tag: case Closure_tag: case Lazy_tag: case Forward_tag: return (-2); /* unsupported */ } work_words = Wosize_hp(work_header); if (work_words == 0) { if (!enable_atoms) return (-2); if (enable_atoms == 1) goto next; }; /* Do the copy. */ work_bytes = Bhsize_hp(work_header); copy_header = dest_cur; dest_cur += work_bytes; if (dest_cur > dest_end && !simulation) return (-4); if (simulation) copy_addr = work_addr; else { memcpy(copy_header, work_header, work_bytes); copy = Val_hp(copy_header); copy_addr = (void *) copy; Hd_val(copy) = Whitehd_hd(Hd_val(copy)) | color; } /* Add the association (work_addr -> copy_addr) to t: */ code = netsys_htab_add(t, work_addr, copy_addr); if (code < 0) return code; /* Add the sub values of work_addr to q: */ for (i=0; i < work_words; ++i) { value field = Field(work, i); if (Is_block (field)) { code = netsys_queue_add(q, (void *) field); if (code != 0) return code; } } } else { /* It an opaque value */ int do_copy = 0; int do_bigarray = 0; void *target_ops = NULL; char caml_id = ' '; /* only b, i, j, n */ /* Check for bigarrays and other custom blocks */ switch (work_tag) { case Abstract_tag: return(-2); case String_tag: do_copy = 1; break; case Double_tag: do_copy = 1; break; case Double_array_tag: do_copy = 1; break; case Custom_tag: { struct custom_operations *custom_ops; char *id; custom_ops = Custom_ops_val(work); id = custom_ops->identifier; if (id[0] == '_') { switch (id[1]) { case 'b': if (!enable_bigarrays) return (-2); if (strcmp(id, "_bigarray") == 0) { caml_id = 'b'; break; } case 'i': /* int32 */ case 'j': /* int64 */ case 'n': /* nativeint */ if (!enable_customs) return (-2); if (id[2] == 0) { caml_id = id[1]; break; } default: return (-2); } } else return (-2); } }; /* switch */ switch (caml_id) { /* look closer at some cases */ case 'b': { target_ops = bigarray_target_ops; do_copy = 1; do_bigarray = 1; break; } case 'i': target_ops = int32_target_ops; do_copy = 1; break; case 'j': target_ops = int64_target_ops; do_copy = 1; break; case 'n': target_ops = nativeint_target_ops; do_copy = 1; break; }; if (do_copy) { /* Copy the value */ work_bytes = Bhsize_hp(work_header); copy_header = dest_cur; dest_cur += work_bytes; if (simulation) copy_addr = work_addr; else { if (dest_cur > dest_end) return (-4); memcpy(copy_header, work_header, work_bytes); copy = Val_hp(copy_header); copy_addr = (void *) copy; Hd_val(copy) = Whitehd_hd(Hd_val(copy)) | color; if (target_ops != NULL) Custom_ops_val(copy) = target_ops; } code = netsys_htab_add(t, work_addr, copy_addr); if (code < 0) return code; } if (do_bigarray) { /* postprocessing for copying bigarrays */ struct caml_ba_array *b_work, *b_copy; void * data_copy; char * data_header; header_t data_header1; size_t size = 1; size_t size_aligned; size_t size_words; b_work = Bigarray_val(work); b_copy = Bigarray_val(copy); for (i = 0; i < b_work->num_dims; i++) { size = size * b_work->dim[i]; }; size = size * caml_ba_element_size[b_work->flags & BIGARRAY_KIND_MASK]; size_aligned = size; if (size%sizeof(void *) != 0) size_aligned += sizeof(void *) - (size%sizeof(void *)); size_words = Wsize_bsize(size_aligned); /* If we put the copy of the bigarray into our own dest buffer, also generate an abstract header, so it can be skipped when iterating over it. We use here a special representation, so we can encode any length in this header (with a normal Ocaml header we are limited by Max_wosize, e.g. 16M on 32 bit systems). The special representation is an Abstract_tag with zero length, followed by the real length (in words) */ if (enable_bigarrays == 2) { data_header = dest_cur; dest_cur += 2*sizeof(void *); data_copy = dest_cur; dest_cur += size_aligned; } else if (!simulation) { data_header = NULL; data_copy = stat_alloc(size_aligned); }; if (!simulation) { if (dest_cur > dest_end) return (-4); /* Initialize header: */ if (data_header != NULL) { data_header1 = Abstract_tag; memcpy(data_header, (char *) &data_header1, sizeof(header_t)); memcpy(data_header + sizeof(header_t), (size_t *) &size_words, sizeof(size_t)); }; /* Copy bigarray: */ memcpy(data_copy, b_work->data, size); b_copy->data = data_copy; b_copy->proxy = NULL; /* If the copy is in our own buffer, it is now externally managed. */ b_copy->flags = (b_copy->flags & ~CAML_BA_MANAGED_MASK) | (enable_bigarrays == 2 ? CAML_BA_EXTERNAL : CAML_BA_MANAGED); } } } /* if (work_tag < No_scan_tag) */ } /* if (copy_addr == NULL) */ /* Switch to next address in q: */ next: code = netsys_queue_take(q, &work_addr); } /* while */ /* Second pass. The copied blocks still have fields pointing to the original blocks. We fix that now by iterating once over the copied memory block. */ if (!simulation) { /* fprintf(stderr, "second pass\n"); */ dest_ptr = dest; while (dest_ptr < dest_cur) { copy_header1 = *((header_t *) dest_ptr); copy_tag = Tag_hd(copy_header1); copy_words = Wosize_hd(copy_header1); copy = (value) (dest_ptr + sizeof(void *)); if (copy_tag < No_scan_tag) { for (i=0; i < copy_words; ++i) { value field = Field(copy, i); if (Is_block (field)) { /* It is a pointer. Try to fix it up. */ code = netsys_htab_lookup(t, (void *) field, &fixup_addr); if (code != 0) return code; if (fixup_addr != NULL) Field(copy,i) = (value) (((char *) fixup_addr) + addr_delta); } } } else if (copy_tag == Abstract_tag && copy_words == 0) { /* our special representation for skipping data regions */ copy_words = ((size_t *) dest_ptr)[1] + 1; }; dest_ptr += (copy_words + 1) * sizeof(void *); } } /* hey, fine. Return result */ *start_offset = sizeof(void *); *bytelen = dest_cur - dest; /* fprintf(stderr, "return regularly\n");*/ return 0; }
value netsys_copy_value(value flags, value orig) { int code; int cflags; intnat start_offset, bytelen; mlsize_t wosize; char *dest, *dest_end, *extra_block, *extra_block_end; int color; struct named_custom_ops bigarray_ops; struct named_custom_ops int32_ops; struct named_custom_ops int64_ops; struct named_custom_ops nativeint_ops; CAMLparam2(orig,flags); CAMLlocal1(block); /* First test on trivial cases: */ if (Is_long(orig) || Wosize_val(orig) == 0) { CAMLreturn(orig); }; code = prep_stat_tab(); if (code != 0) goto exit; code = prep_stat_queue(); if (code != 0) goto exit; cflags = caml_convert_flag_list(flags, init_value_flags); /* fprintf (stderr, "counting\n"); */ /* Count only! */ code = netsys_init_value_1(stat_tab, stat_queue, NULL, NULL, orig, (cflags & 1) ? 1 : 0, /* enable_bigarrays */ (cflags & 2) ? 1 : 0, /* enable_customs */ 1, /* enable_atoms */ 1, /* simulate */ NULL, NULL, 0, &start_offset, &bytelen); if (code != 0) goto exit; /* fprintf (stderr, "done counting bytelen=%ld\n", bytelen); */ /* set up the custom ops. We always set this, because we assume that the values in [orig] are not trustworthy */ bigarray_ops.name = "_bigarray"; bigarray_ops.ops = Custom_ops_val(alloc_bigarray_dims(CAML_BA_UINT8 | BIGARRAY_C_LAYOUT, 1, NULL, 1)); bigarray_ops.next = &int32_ops; int32_ops.name = "_i"; int32_ops.ops = Custom_ops_val(caml_copy_int32(0)); int32_ops.next = &int64_ops; int64_ops.name = "_j"; int64_ops.ops = Custom_ops_val(caml_copy_int64(0)); int64_ops.next = &nativeint_ops; nativeint_ops.name = "_n"; nativeint_ops.ops = Custom_ops_val(caml_copy_nativeint(0)); nativeint_ops.next = NULL; /* alloc */ extra_block = NULL; extra_block_end = NULL; /* shamelessly copied from intern.c */ wosize = Wosize_bhsize(bytelen); /* fprintf (stderr, "wosize=%ld\n", wosize); */ if (wosize > Max_wosize) { /* Round desired size up to next page */ asize_t request = ((bytelen + Page_size - 1) >> Page_log) << Page_log; extra_block = caml_alloc_for_heap(request); if (extra_block == NULL) caml_raise_out_of_memory(); extra_block_end = extra_block + request; color = caml_allocation_color(extra_block); dest = extra_block; dest_end = dest + bytelen; block = Val_hp(extra_block); } else {
static void extern_rec(value v) { tailcall: if (Is_long(v)) { intnat n = Long_val(v); if (n >= 0 && n < 0x40) { Write(PREFIX_SMALL_INT + n); } else if (n >= -(1 << 7) && n < (1 << 7)) { writecode8(CODE_INT8, n); } else if (n >= -(1 << 15) && n < (1 << 15)) { writecode16(CODE_INT16, n); #ifdef ARCH_SIXTYFOUR } else if (n < -((intnat)1 << 31) || n >= ((intnat)1 << 31)) { writecode64(CODE_INT64, n); #endif } else writecode32(CODE_INT32, n); return; } if (Is_in_value_area(v)) { header_t hd = Hd_val(v); tag_t tag = Tag_hd(hd); mlsize_t sz = Wosize_hd(hd); if (tag == Forward_tag) { value f = Forward_val (v); if (Is_block (f) && (!Is_in_value_area(f) || Tag_val (f) == Forward_tag || Tag_val (f) == Lazy_tag || Tag_val (f) == Double_tag)){ /* Do not short-circuit the pointer. */ }else{ v = f; goto tailcall; } } /* Atoms are treated specially for two reasons: they are not allocated in the externed block, and they are automatically shared. */ if (sz == 0) { if (tag < 16) { Write(PREFIX_SMALL_BLOCK + tag); } else { writecode32(CODE_BLOCK32, hd); } return; } /* Check if already seen */ if (Color_hd(hd) == Caml_blue) { uintnat d = obj_counter - (uintnat) Field(v, 0); if (d < 0x100) { writecode8(CODE_SHARED8, d); } else if (d < 0x10000) { writecode16(CODE_SHARED16, d); } else { writecode32(CODE_SHARED32, d); } return; } /* Output the contents of the object */ switch(tag) { case String_tag: { mlsize_t len = caml_string_length(v); if (len < 0x20) { Write(PREFIX_SMALL_STRING + len); } else if (len < 0x100) { writecode8(CODE_STRING8, len); } else { writecode32(CODE_STRING32, len); } writeblock(String_val(v), len); size_32 += 1 + (len + 4) / 4; size_64 += 1 + (len + 8) / 8; extern_record_location(v); break; } case Double_tag: { if (sizeof(double) != 8) extern_invalid_argument("output_value: non-standard floats"); Write(CODE_DOUBLE_NATIVE); writeblock_float8((double *) v, 1); size_32 += 1 + 2; size_64 += 1 + 1; extern_record_location(v); break; } case Double_array_tag: { mlsize_t nfloats; if (sizeof(double) != 8) extern_invalid_argument("output_value: non-standard floats"); nfloats = Wosize_val(v) / Double_wosize; if (nfloats < 0x100) { writecode8(CODE_DOUBLE_ARRAY8_NATIVE, nfloats); } else { writecode32(CODE_DOUBLE_ARRAY32_NATIVE, nfloats); } writeblock_float8((double *) v, nfloats); size_32 += 1 + nfloats * 2; size_64 += 1 + nfloats; extern_record_location(v); break; } case Abstract_tag: extern_invalid_argument("output_value: abstract value (Abstract)"); break; case Infix_tag: writecode32(CODE_INFIXPOINTER, Infix_offset_hd(hd)); extern_rec(v - Infix_offset_hd(hd)); break; case Custom_tag: { uintnat sz_32, sz_64; char * ident = Custom_ops_val(v)->identifier; void (*serialize)(value v, uintnat * wsize_32, uintnat * wsize_64) = Custom_ops_val(v)->serialize; if (serialize == NULL) extern_invalid_argument("output_value: abstract value (Custom)"); Write(CODE_CUSTOM); writeblock(ident, strlen(ident) + 1); Custom_ops_val(v)->serialize(v, &sz_32, &sz_64); size_32 += 2 + ((sz_32 + 3) >> 2); /* header + ops + data */ size_64 += 2 + ((sz_64 + 7) >> 3); extern_record_location(v); break; } default: { value field0; mlsize_t i; if (tag < 16 && sz < 8) { Write(PREFIX_SMALL_BLOCK + tag + (sz << 4)); #ifdef ARCH_SIXTYFOUR } else if (hd >= ((uintnat)1 << 32)) { writecode64(CODE_BLOCK64, Whitehd_hd (hd)); #endif } else { writecode32(CODE_BLOCK32, Whitehd_hd (hd)); } size_32 += 1 + sz; size_64 += 1 + sz; field0 = Field(v, 0); extern_record_location(v); if (sz == 1) { v = field0; } else { extern_rec(field0); for (i = 1; i < sz - 1; i++) extern_rec(Field(v, i)); v = Field(v, i); } goto tailcall; } } } else if ((char *) v >= caml_code_area_start &&
static void extern_rec(value v) { struct code_fragment * cf; struct extern_item * sp; sp = extern_stack; while(1) { if (Is_long(v)) { intnat n = Long_val(v); if (n >= 0 && n < 0x40) { Write(PREFIX_SMALL_INT + n); } else if (n >= -(1 << 7) && n < (1 << 7)) { writecode8(CODE_INT8, n); } else if (n >= -(1 << 15) && n < (1 << 15)) { writecode16(CODE_INT16, n); #ifdef ARCH_SIXTYFOUR } else if (n < -((intnat)1 << 31) || n >= ((intnat)1 << 31)) { writecode64(CODE_INT64, n); #endif } else writecode32(CODE_INT32, n); goto next_item; } if (Is_in_value_area(v)) { header_t hd = Hd_val(v); tag_t tag = Tag_hd(hd); mlsize_t sz = Wosize_hd(hd); if (tag == Forward_tag) { value f = Forward_val (v); if (Is_block (f) && (!Is_in_value_area(f) || Tag_val (f) == Forward_tag || Tag_val (f) == Lazy_tag || Tag_val (f) == Double_tag)){ /* Do not short-circuit the pointer. */ }else{ v = f; continue; } } /* Atoms are treated specially for two reasons: they are not allocated in the externed block, and they are automatically shared. */ if (sz == 0) { if (tag < 16) { Write(PREFIX_SMALL_BLOCK + tag); } else { writecode32(CODE_BLOCK32, hd); } goto next_item; } /* Check if already seen */ if (Color_hd(hd) == Caml_blue) { uintnat d = obj_counter - (uintnat) Field(v, 0); if (d < 0x100) { writecode8(CODE_SHARED8, d); } else if (d < 0x10000) { writecode16(CODE_SHARED16, d); } else { writecode32(CODE_SHARED32, d); } goto next_item; } /* Output the contents of the object */ switch(tag) { case String_tag: { mlsize_t len = caml_string_length(v); if (len < 0x20) { Write(PREFIX_SMALL_STRING + len); } else if (len < 0x100) { writecode8(CODE_STRING8, len); } else { writecode32(CODE_STRING32, len); } writeblock(String_val(v), len); size_32 += 1 + (len + 4) / 4; size_64 += 1 + (len + 8) / 8; extern_record_location(v); break; } case Double_tag: { if (sizeof(double) != 8) extern_invalid_argument("output_value: non-standard floats"); Write(CODE_DOUBLE_NATIVE); writeblock_float8((double *) v, 1); size_32 += 1 + 2; size_64 += 1 + 1; extern_record_location(v); break; } case Double_array_tag: { mlsize_t nfloats; if (sizeof(double) != 8) extern_invalid_argument("output_value: non-standard floats"); nfloats = Wosize_val(v) / Double_wosize; if (nfloats < 0x100) { writecode8(CODE_DOUBLE_ARRAY8_NATIVE, nfloats); } else { writecode32(CODE_DOUBLE_ARRAY32_NATIVE, nfloats); } writeblock_float8((double *) v, nfloats); size_32 += 1 + nfloats * 2; size_64 += 1 + nfloats; extern_record_location(v); break; } case Abstract_tag: extern_invalid_argument("output_value: abstract value (Abstract)"); break; case Infix_tag: writecode32(CODE_INFIXPOINTER, Infix_offset_hd(hd)); extern_rec(v - Infix_offset_hd(hd)); break; case Custom_tag: { uintnat sz_32, sz_64; char * ident = Custom_ops_val(v)->identifier; void (*serialize)(value v, uintnat * wsize_32, uintnat * wsize_64) = Custom_ops_val(v)->serialize; if (serialize == NULL) extern_invalid_argument("output_value: abstract value (Custom)"); Write(CODE_CUSTOM); writeblock(ident, strlen(ident) + 1); Custom_ops_val(v)->serialize(v, &sz_32, &sz_64); size_32 += 2 + ((sz_32 + 3) >> 2); /* header + ops + data */ size_64 += 2 + ((sz_64 + 7) >> 3); extern_record_location(v); break; } default: { value field0; if (tag < 16 && sz < 8) { Write(PREFIX_SMALL_BLOCK + tag + (sz << 4)); #ifdef ARCH_SIXTYFOUR } else if (hd >= ((uintnat)1 << 32)) { writecode64(CODE_BLOCK64, Whitehd_hd (hd)); #endif } else { writecode32(CODE_BLOCK32, Whitehd_hd (hd)); } size_32 += 1 + sz; size_64 += 1 + sz; field0 = Field(v, 0); extern_record_location(v); /* Remember that we still have to serialize fields 1 ... sz - 1 */ if (sz > 1) { sp++; if (sp >= extern_stack_limit) sp = extern_resize_stack(sp); sp->v = &Field(v,1); sp->count = sz-1; } /* Continue serialization with the first field */ v = field0; continue; } } } else if ((cf = extern_find_code((char *) v)) != NULL) { if (!extern_closures) extern_invalid_argument("output_value: functional value"); writecode32(CODE_CODEPOINTER, (char *) v - cf->code_start); writeblock((char *) cf->digest, 16); } else { extern_invalid_argument("output_value: abstract value (outside heap)"); } next_item: /* Pop one more item to marshal, if any */ if (sp == extern_stack) { /* We are done. Cleanup the stack and leave the function */ extern_free_stack(); return; } v = *((sp->v)++); if (--(sp->count) == 0) sp--; }
void caml_empty_minor_heap_domain (struct domain* domain) { CAMLnoalloc; caml_domain_state* domain_state = domain->state; struct caml_minor_tables *minor_tables = domain_state->minor_tables; unsigned rewrite_successes = 0; unsigned rewrite_failures = 0; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; uintnat minor_allocated_bytes = young_end - young_ptr; struct oldify_state st = {0}; value **r; struct caml_ephe_ref_elt *re; struct caml_custom_elt *elt; st.promote_domain = domain; if (minor_allocated_bytes != 0) { uintnat prev_alloc_words = domain_state->allocated_words; #ifdef DEBUG /* In DEBUG mode, verify that the minor_ref table contains all young-young pointers from older to younger objects */ { struct addrmap young_young_ptrs = ADDRMAP_INIT; mlsize_t i; value iter; for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) { *caml_addrmap_insert_pos(&young_young_ptrs, (value)*r) = 1; } for (iter = (value)young_ptr; iter < (value)young_end; iter = next_minor_block(domain_state, iter)) { value hd = Hd_hp(iter); if (hd != 0) { value curr = Val_hp(iter); tag_t tag = Tag_hd (hd); if (tag < No_scan_tag && tag != Cont_tag) { // FIXME: should scan Cont_tag for (i = 0; i < Wosize_hd(hd); i++) { value* f = Op_val(curr) + i; if (Is_block(*f) && is_in_interval(*f, young_ptr, young_end) && *f < curr) { CAMLassert(caml_addrmap_contains(&young_young_ptrs, (value)f)); } } } } } caml_addrmap_clear(&young_young_ptrs); } #endif caml_gc_log ("Minor collection of domain %d starting", domain->state->id); caml_ev_begin("minor_gc"); caml_ev_begin("minor_gc/roots"); caml_do_local_roots(&oldify_one, &st, domain, 0); caml_scan_stack(&oldify_one, &st, domain_state->current_stack); for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) { value x = **r; oldify_one (&st, x, &x); } caml_ev_end("minor_gc/roots"); caml_ev_begin("minor_gc/promote"); oldify_mopup (&st); caml_ev_end("minor_gc/promote"); caml_ev_begin("minor_gc/ephemerons"); for (re = minor_tables->ephe_ref.base; re < minor_tables->ephe_ref.ptr; re++) { CAMLassert (Ephe_domain(re->ephe) == domain); if (re->offset == CAML_EPHE_DATA_OFFSET) { /* Data field has already been handled in oldify_mopup. Handle only * keys here. */ continue; } value* key = &Op_val(re->ephe)[re->offset]; if (*key != caml_ephe_none && Is_block(*key) && is_in_interval(*key, young_ptr, young_end)) { resolve_infix_val(key); if (Hd_val(*key) == 0) { /* value copied to major heap */ *key = Op_val(*key)[0]; } else { CAMLassert(!ephe_check_alive_data(re,young_ptr,young_end)); *key = caml_ephe_none; Ephe_data(re->ephe) = caml_ephe_none; } } } caml_ev_end("minor_gc/ephemerons"); caml_ev_begin("minor_gc/update_minor_tables"); for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) { value v = **r; if (Is_block (v) && is_in_interval ((value)Hp_val(v), young_ptr, young_end)) { value vnew; header_t hd = Hd_val(v); int offset = 0; if (Tag_hd(hd) == Infix_tag) { offset = Infix_offset_hd(hd); v -= offset; } CAMLassert (Hd_val(v) == 0); vnew = Op_val(v)[0] + offset; CAMLassert (Is_block(vnew) && !Is_minor(vnew)); CAMLassert (Hd_val(vnew)); if (Tag_hd(hd) == Infix_tag) { CAMLassert(Tag_val(vnew) == Infix_tag); v += offset; } if (caml_domain_alone()) { **r = vnew; ++rewrite_successes; } else { if (atomic_compare_exchange_strong((atomic_value*)*r, &v, vnew)) ++rewrite_successes; else ++rewrite_failures; } } } CAMLassert (!caml_domain_alone() || rewrite_failures == 0); caml_ev_end("minor_gc/update_minor_tables"); caml_ev_begin("minor_gc/finalisers"); caml_final_update_last_minor(domain); /* Run custom block finalisation of dead minor values */ for (elt = minor_tables->custom.base; elt < minor_tables->custom.ptr; elt++) { value v = elt->block; if (Hd_val(v) == 0) { /* !!caml_adjust_gc_speed(elt->mem, elt->max); */ } else { /* Block will be freed: call finalisation function, if any */ void (*final_fun)(value) = Custom_ops_val(v)->finalize; if (final_fun != NULL) final_fun(v); } } caml_final_empty_young(domain); caml_ev_end("minor_gc/finalisers"); clear_table ((struct generic_table *)&minor_tables->major_ref); clear_table ((struct generic_table *)&minor_tables->minor_ref); clear_table ((struct generic_table *)&minor_tables->ephe_ref); clear_table ((struct generic_table *)&minor_tables->custom); domain_state->young_ptr = domain_state->young_end; domain_state->stat_minor_words += Wsize_bsize (minor_allocated_bytes); domain_state->stat_minor_collections++; domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words; caml_ev_end("minor_gc"); caml_gc_log ("Minor collection of domain %d completed: %2.0f%% of %u KB live, rewrite: successes=%u failures=%u", domain->state->id, 100.0 * (double)st.live_bytes / (double)minor_allocated_bytes, (unsigned)(minor_allocated_bytes + 512)/1024, rewrite_successes, rewrite_failures); } else { caml_final_empty_young(domain); caml_gc_log ("Minor collection of domain %d: skipping", domain->state->id); } #ifdef DEBUG { value *p; for (p = (value *) domain_state->young_start; p < (value *) domain_state->young_end; ++p){ *p = Debug_free_minor; } } #endif }
static void extern_rec_r(CAML_R, value v) { struct code_fragment * cf; struct extern_item * sp; sp = extern_stack; while(1) { //?????DUMP("QQQ 0x%lx, or %li ", v, v); if (Is_long(v)) { intnat n = Long_val(v); if (n >= 0 && n < 0x40) { Write(PREFIX_SMALL_INT + n); } else if (n >= -(1 << 7) && n < (1 << 7)) { writecode8_r(ctx, CODE_INT8, n); } else if (n >= -(1 << 15) && n < (1 << 15)) { writecode16_r(ctx, CODE_INT16, n); #ifdef ARCH_SIXTYFOUR } else if (n < -((intnat)1 << 31) || n >= ((intnat)1 << 31)) { writecode64_r(ctx, CODE_INT64, n); #endif } else writecode32_r(ctx, CODE_INT32, n); goto next_item; } if (Is_in_value_area(v)) { header_t hd = Hd_val(v); tag_t tag = Tag_hd(hd); mlsize_t sz = Wosize_hd(hd); //DUMP("dumping %p, tag %i, size %i", (void*)v, (int)tag, (int)sz); // !!!!!!!!!!!!!!! if (tag == Forward_tag) { value f = Forward_val (v); if (Is_block (f) && (!Is_in_value_area(f) || Tag_val (f) == Forward_tag || Tag_val (f) == Lazy_tag || Tag_val (f) == Double_tag)){ /* Do not short-circuit the pointer. */ }else{ v = f; continue; } } /* Atoms are treated specially for two reasons: they are not allocated in the externed block, and they are automatically shared. */ if (sz == 0) { if (tag < 16) { Write(PREFIX_SMALL_BLOCK + tag); } else { writecode32_r(ctx, CODE_BLOCK32, hd); } goto next_item; } /* Check if already seen */ if (Color_hd(hd) == Caml_blue) { uintnat d = obj_counter - (uintnat) Field(v, 0); if (d < 0x100) { writecode8_r(ctx, CODE_SHARED8, d); } else if (d < 0x10000) { writecode16_r(ctx, CODE_SHARED16, d); } else { writecode32_r(ctx, CODE_SHARED32, d); } goto next_item; } /* Output the contents of the object */ switch(tag) { case String_tag: { mlsize_t len = caml_string_length(v); if (len < 0x20) { Write(PREFIX_SMALL_STRING + len); } else if (len < 0x100) { writecode8_r(ctx, CODE_STRING8, len); } else { writecode32_r(ctx, CODE_STRING32, len); } writeblock_r(ctx, String_val(v), len); size_32 += 1 + (len + 4) / 4; size_64 += 1 + (len + 8) / 8; extern_record_location_r(ctx, v); break; } case Double_tag: { if (sizeof(double) != 8) extern_invalid_argument_r(ctx, "output_value: non-standard floats"); Write(CODE_DOUBLE_NATIVE); writeblock_float8((double *) v, 1); size_32 += 1 + 2; size_64 += 1 + 1; extern_record_location_r(ctx,v); break; } case Double_array_tag: { mlsize_t nfloats; if (sizeof(double) != 8) extern_invalid_argument_r(ctx, "output_value: non-standard floats"); nfloats = Wosize_val(v) / Double_wosize; if (nfloats < 0x100) { writecode8_r(ctx, CODE_DOUBLE_ARRAY8_NATIVE, nfloats); } else { writecode32_r(ctx, CODE_DOUBLE_ARRAY32_NATIVE, nfloats); } writeblock_float8((double *) v, nfloats); size_32 += 1 + nfloats * 2; size_64 += 1 + nfloats; extern_record_location_r(ctx, v); break; } case Abstract_tag: extern_invalid_argument_r(ctx, "output_value: abstract value (Abstract)"); break; case Infix_tag: writecode32_r(ctx,CODE_INFIXPOINTER, Infix_offset_hd(hd)); extern_rec_r(ctx, v - Infix_offset_hd(hd)); break; case Custom_tag: { uintnat sz_32, sz_64; char * ident = Custom_ops_val(v)->identifier; void (*serialize)(value v, uintnat * wsize_32, uintnat * wsize_64); //printf("[object at %p, which is a %s custom: BEGIN\n", (void*)v, Custom_ops_val(v)->identifier); if(extern_cross_context){ //printf("About the object at %p, which is a %s custom: USING a cross-context serializer\n", (void*)v, Custom_ops_val(v)->identifier); serialize = Custom_ops_val(v)->cross_context_serialize; } else{ //printf("About the object at %p, which is a %s custom: NOT using a cross-context serializer\n", (void*)v, Custom_ops_val(v)->identifier); serialize = Custom_ops_val(v)->serialize; } //printf("Still alive 100\n"); if (serialize == NULL){ ////// //struct custom_operations *o = Custom_ops_val(v); //printf("About the object at %p, which is a %s custom\n", (void*)v, Custom_ops_val(v)->identifier); volatile int a = 1; a /= 0; /////////// extern_invalid_argument_r(ctx, "output_value: abstract value (Custom)"); } //printf("Still alive 200\n"); Write(CODE_CUSTOM); //printf("Still alive 300\n"); writeblock_r(ctx, ident, strlen(ident) + 1); //printf("Still alive 400\n"); serialize(v, &sz_32, &sz_64); //printf("Still alive 500\n"); size_32 += 2 + ((sz_32 + 3) >> 2); /* header + ops + data */ size_64 += 2 + ((sz_64 + 7) >> 3); //printf("Still alive 600\n"); extern_record_location_r(ctx,v); // This temporarily breaks the object, by replacing it with a forwarding pointer //printf("object at %p, which is a custom: END\n", (void*)v); break; } default: { value field0; if (tag < 16 && sz < 8) { Write(PREFIX_SMALL_BLOCK + tag + (sz << 4)); #ifdef ARCH_SIXTYFOUR } else if (hd >= ((uintnat)1 << 32)) { writecode64_r(ctx, CODE_BLOCK64, Whitehd_hd (hd)); #endif } else { writecode32_r(ctx, CODE_BLOCK32, Whitehd_hd (hd)); } size_32 += 1 + sz; size_64 += 1 + sz; field0 = Field(v, 0); extern_record_location_r(ctx, v); /* Remember that we still have to serialize fields 1 ... sz - 1 */ if (sz > 1) { sp++; if (sp >= extern_stack_limit) sp = extern_resize_stack_r(ctx, sp); sp->v = &Field(v,1); sp->count = sz-1; } /* Continue serialization with the first field */ v = field0; continue; } } } else if ((cf = extern_find_code_r(ctx, (char *) v)) != NULL) { if (!extern_closures){ extern_invalid_argument_r(ctx, "output_value: functional value"); // FIXME: this is the correct version. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! //DUMP("output_value: functional value"); {volatile int a = 1; a /= 0;} } //fprintf(stderr, "ZZZZ dumping a code pointer: BEGIN\n"); //DUMP("dumping a code pointer 0x%lx, or %li; code start is at %p", v, v, cf->code_start); writecode32_r(ctx, CODE_CODEPOINTER, (char *) v - cf->code_start); writeblock_r(ctx, (char *) cf->digest, 16); //dump_digest(cf->digest); //fprintf(stderr, "ZZZZ dumping a code pointer: END\n"); } else { if(extern_cross_context){ fprintf(stderr, "ZZZZ working on the external pointer: %p, which is to say %li [cf is %p]\n", (void*)v, (long)v, cf); //fprintf(stderr, "ZZZZ I'm doing a horrible, horrible thing: serializing the pointer as a tagged 0.\n"); DUMP("about to crash in the strange case I'm debugging"); /* DUMP("the object is 0x%lx, or %li ", v, v); */ /* DUMP("probably crashing now"); */ /* DUMP("tag is %i", (int)Tag_val(v)); */ /* DUMP("size is %i", (int)Wosize_val(v)); */ //volatile int a = 1; a /= 0; //extern_rec_r(ctx, Val_int(0)); /* fprintf(stderr, "ZZZZ [This is probably wrong: I'm marshalling an out-of-heap pointer as an int64]\n"); */ /* writecode64_r(ctx, CODE_INT64, (v << 1) | 1); */ extern_invalid_argument_r(ctx, "output_value: abstract value (outside heap) [FIXME: implement]"); } else extern_invalid_argument_r(ctx, "output_value: abstract value (outside heap)"); } next_item: /* Pop one more item to marshal, if any */ if (sp == extern_stack) { /* We are done. Cleanup the stack and leave the function */ extern_free_stack_r(ctx); return; } v = *((sp->v)++); if (--(sp->count) == 0) sp--; }
static intnat compare_val(value v1, value v2, int total) { struct compare_item * sp; tag_t t1, t2; if (!compare_stack) compare_init_stack(); sp = compare_stack; while (1) { if (v1 == v2 && total) goto next_item; if (Is_long(v1)) { if (v1 == v2) goto next_item; if (Is_long(v2)) return Long_val(v1) - Long_val(v2); /* Subtraction above cannot overflow and cannot result in UNORDERED */ switch (Tag_val(v2)) { case Forward_tag: v2 = Forward_val(v2); continue; case Custom_tag: { int res; int (*compare)(value v1, value v2) = Custom_ops_val(v2)->compare_ext; if (compare == NULL) break; /* for backward compatibility */ caml_compare_unordered = 0; res = compare(v1, v2); if (caml_compare_unordered && !total) return UNORDERED; if (res != 0) return res; goto next_item; } default: /*fallthrough*/; } return LESS; /* v1 long < v2 block */ } if (Is_long(v2)) { switch (Tag_val(v1)) { case Forward_tag: v1 = Forward_val(v1); continue; case Custom_tag: { int res; int (*compare)(value v1, value v2) = Custom_ops_val(v1)->compare_ext; if (compare == NULL) break; /* for backward compatibility */ caml_compare_unordered = 0; res = compare(v1, v2); if (caml_compare_unordered && !total) return UNORDERED; if (res != 0) return res; goto next_item; } default: /*fallthrough*/; } return GREATER; /* v1 block > v2 long */ } t1 = Tag_val(v1); t2 = Tag_val(v2); if (t1 == Forward_tag) { v1 = Forward_val (v1); continue; } if (t2 == Forward_tag) { v2 = Forward_val (v2); continue; } if (t1 != t2) return (intnat)t1 - (intnat)t2; switch(t1) { case String_tag: { mlsize_t len1, len2; int res; if (v1 == v2) break; len1 = caml_string_length(v1); len2 = caml_string_length(v2); res = memcmp(String_val(v1), String_val(v2), len1 <= len2 ? len1 : len2); if (res < 0) return LESS; if (res > 0) return GREATER; if (len1 != len2) return len1 - len2; break; } case Double_tag: { double d1 = Double_val(v1); double d2 = Double_val(v2); if (d1 < d2) return LESS; if (d1 > d2) return GREATER; if (d1 != d2) { if (! total) return UNORDERED; /* One or both of d1 and d2 is NaN. Order according to the convention NaN = NaN and NaN < f for all other floats f. */ if (d1 == d1) return GREATER; /* d1 is not NaN, d2 is NaN */ if (d2 == d2) return LESS; /* d2 is not NaN, d1 is NaN */ /* d1 and d2 are both NaN, thus equal: continue comparison */ } break; } case Double_array_tag: { mlsize_t sz1 = Wosize_val(v1) / Double_wosize; mlsize_t sz2 = Wosize_val(v2) / Double_wosize; mlsize_t i; if (sz1 != sz2) return sz1 - sz2; for (i = 0; i < sz1; i++) { double d1 = Double_field(v1, i); double d2 = Double_field(v2, i); if (d1 < d2) return LESS; if (d1 > d2) return GREATER; if (d1 != d2) { if (! total) return UNORDERED; /* See comment for Double_tag case */ if (d1 == d1) return GREATER; if (d2 == d2) return LESS; } } break; } case Abstract_tag: compare_free_stack(); caml_invalid_argument("equal: abstract value"); case Closure_tag: case Infix_tag: compare_free_stack(); caml_invalid_argument("equal: functional value"); case Object_tag: { intnat oid1 = Oid_val(v1); intnat oid2 = Oid_val(v2); if (oid1 != oid2) return oid1 - oid2; break; } case Custom_tag: { int res; int (*compare)(value v1, value v2) = Custom_ops_val(v1)->compare; /* Hardening against comparisons between different types */ if (compare != Custom_ops_val(v2)->compare) { return strcmp(Custom_ops_val(v1)->identifier, Custom_ops_val(v2)->identifier) < 0 ? LESS : GREATER; } if (compare == NULL) { compare_free_stack(); caml_invalid_argument("equal: abstract value"); } caml_compare_unordered = 0; res = compare(v1, v2); if (caml_compare_unordered && !total) return UNORDERED; if (res != 0) return res; break; } default: { mlsize_t sz1 = Wosize_val(v1); mlsize_t sz2 = Wosize_val(v2); /* Compare sizes first for speed */ if (sz1 != sz2) return sz1 - sz2; if (sz1 == 0) break; /* Remember that we still have to compare fields 1 ... sz - 1 */ if (sz1 > 1) { sp++; if (sp >= compare_stack_limit) sp = compare_resize_stack(sp); sp->v1 = Op_val(v1) + 1; sp->v2 = Op_val(v2) + 1; sp->count = sz1 - 1; } /* Continue comparison with first field */ v1 = Field(v1, 0); v2 = Field(v2, 0); continue; } } next_item: /* Pop one more item to compare, if any */ if (sp == compare_stack) return EQUAL; /* we're done */ v1 = *((sp->v1)++); v2 = *((sp->v2)++); if (--(sp->count) == 0) sp--; } }