static void realloc_generic_table (struct generic_table *tbl, asize_t element_size, char * msg_intr_int, char *msg_threshold, char *msg_growing, char *msg_error) { CAMLassert (tbl->ptr == tbl->limit); CAMLassert (tbl->limit <= tbl->end); CAMLassert (tbl->limit >= tbl->threshold); if (tbl->base == NULL){ alloc_generic_table (tbl, Caml_state->minor_heap_wsz / 8, 256, element_size); }else if (tbl->limit == tbl->threshold){ CAML_INSTR_INT (msg_intr_int, 1); caml_gc_message (0x08, msg_threshold, 0); tbl->limit = tbl->end; caml_urge_major_slice (); }else{ asize_t sz; asize_t cur_ptr = tbl->ptr - tbl->base; tbl->size *= 2; sz = (tbl->size + tbl->reserve) * element_size; caml_gc_message (0x08, msg_growing, (intnat) sz/1024); tbl->base = caml_stat_resize_noexc (tbl->base, sz); if (tbl->base == NULL){ caml_fatal_error (msg_error); } tbl->end = tbl->base + (tbl->size + tbl->reserve) * element_size; tbl->threshold = tbl->base + tbl->size * element_size; tbl->ptr = tbl->base + cur_ptr; tbl->limit = tbl->end; } }
void caml_free_minor_tables(struct caml_minor_tables* r) { CAMLassert(r->major_ref.ptr == r->major_ref.base); CAMLassert(r->minor_ref.ptr == r->minor_ref.base); reset_table((struct generic_table *)&r->major_ref); reset_table((struct generic_table *)&r->minor_ref); reset_table((struct generic_table *)&r->ephe_ref); reset_table((struct generic_table *)&r->custom); caml_stat_free(r); }
CAMLexport value caml_alloc (mlsize_t wosize, tag_t tag) { value result; mlsize_t i; CAMLassert (tag < 256); CAMLassert (tag != Infix_tag); if (wosize == 0){ result = Atom (tag); } else if (wosize <= Max_young_wosize) { Alloc_small (result, wosize, tag, { caml_handle_gc_interrupt(); });
CAMLprim value caml_runtime_parameters (value unit) { CAMLassert (unit == Val_unit); return caml_alloc_sprintf ("a=%d,b=%s,H=%lu,i=%lu,l=%lu,o=%lu,O=%lu,p=%d,s=%lu,t=%d,v=%lu,w=%d,W=%lu", /* a */ caml_allocation_policy, /* b */ caml_backtrace_active, /* h */ /* missing */ /* FIXME add when changed to min_heap_size */ /* H */ caml_use_huge_pages, /* i */ caml_major_heap_increment, #ifdef NATIVE_CODE /* l */ 0, #else /* l */ caml_max_stack_size, #endif /* o */ caml_percent_free, /* O */ caml_percent_max, /* p */ caml_parser_trace, /* R */ /* missing */ /* s */ caml_minor_heap_wsz, /* t */ caml_trace_level, /* v */ caml_verb_gc, /* w */ caml_major_window, /* W */ caml_runtime_warnings ); }
/* [size] is a number of bytes */ CAMLexport value caml_alloc_custom(struct custom_operations * ops, uintnat size, mlsize_t mem, mlsize_t max) { mlsize_t wosize; value result; wosize = 1 + (size + sizeof(value) - 1) / sizeof(value); if (wosize <= Max_young_wosize) { result = caml_alloc_small(wosize, Custom_tag); Custom_ops_val(result) = ops; if (ops->finalize != NULL) { /* Remembered that the block has a finalizer */ if (caml_finalize_table.ptr >= caml_finalize_table.limit){ CAMLassert (caml_finalize_table.ptr == caml_finalize_table.limit); caml_realloc_ref_table (&caml_finalize_table); } *caml_finalize_table.ptr++ = (value *)result; } } else { result = caml_alloc_shr(wosize, Custom_tag); Custom_ops_val(result) = ops; caml_adjust_gc_speed(mem, max); result = caml_check_urgent_gc(result); } return result; }
int caml_alloc_backtrace_buffer(void){ CAMLassert(Caml_state->backtrace_pos == 0); Caml_state->backtrace_buffer = caml_stat_alloc_noexc(BACKTRACE_BUFFER_SIZE * sizeof(code_t)); if (Caml_state->backtrace_buffer == NULL) return -1; return 0; }
CAMLexport value caml_callbackN_exn(value closure, int narg, value args[]) { CAMLparam0(); struct stack_info* parent_stack; int i; value res; caml_domain_state* domain_state = Caml_state; parent_stack = Stack_parent(domain_state->current_stack); Stack_parent(domain_state->current_stack) = NULL; CAMLassert(narg + 4 <= 256); domain_state->current_stack->sp -= narg + 4; for (i = 0; i < narg; i++) domain_state->current_stack->sp[i] = args[i]; /* arguments */ opcode_t code[7] = { callback_code[0], narg + 3, callback_code[2], narg, callback_code[4], callback_code[5], callback_code[6] }; domain_state->current_stack->sp[narg] = Val_pc (code + 4); /* return address */ domain_state->current_stack->sp[narg + 1] = Val_unit; /* environment */ domain_state->current_stack->sp[narg + 2] = Val_long(0); /* extra args */ domain_state->current_stack->sp[narg + 3] = closure; res = caml_interprete(code, sizeof(code)); if (Is_exception_result(res)) domain_state->current_stack->sp += narg + 4; /* PR#1228 */ Assert(Stack_parent(domain_state->current_stack) == NULL); Stack_parent(domain_state->current_stack) = parent_stack; CAMLreturn (res); }
static struct ev_info *process_debug_events(code_t code_start, value events_heap, mlsize_t *num_events) { CAMLparam1(events_heap); CAMLlocal3(l, ev, ev_start); mlsize_t i, j; struct ev_info *events; /* Compute the size of the required event buffer. */ *num_events = 0; for (i = 0; i < caml_array_length(events_heap); i++) for (l = Field_imm(events_heap, i); l != Val_int(0); l = Field_imm(l, 1)) (*num_events)++; if (*num_events == 0) CAMLreturnT(struct ev_info *, NULL); events = caml_stat_alloc_noexc(*num_events * sizeof(struct ev_info)); if(events == NULL) caml_fatal_error ("caml_add_debug_info: out of memory"); j = 0; for (i = 0; i < caml_array_length(events_heap); i++) { for (l = Field_imm(events_heap, i); l != Val_int(0); l = Field_imm(l, 1)) { ev = Field_imm(l, 0); events[j].ev_pc = (code_t)((char*)code_start + Long_val(Field_imm(ev, EV_POS))); ev_start = Field(Field(ev, EV_LOC), LOC_START); { uintnat fnsz = caml_string_length(Field_imm(ev_start, POS_FNAME)) + 1; events[j].ev_filename = (char*)caml_stat_alloc_noexc(fnsz); if(events[j].ev_filename == NULL) caml_fatal_error ("caml_add_debug_info: out of memory"); memcpy(events[j].ev_filename, String_val(Field(ev_start, POS_FNAME)), fnsz); } events[j].ev_lnum = Int_val(Field(ev_start, POS_LNUM)); events[j].ev_startchr = Int_val(Field(ev_start, POS_CNUM)) - Int_val(Field(ev_start, POS_BOL)); events[j].ev_endchr = Int_val(Field(Field(Field(ev, EV_LOC), LOC_END), POS_CNUM)) - Int_val(Field(ev_start, POS_BOL)); j++; } } CAMLassert(j == *num_events); qsort(events, *num_events, sizeof(struct ev_info), cmp_ev_info); CAMLreturnT(struct ev_info *, events); }
CAMLprim value caml_gc_minor(value v) { CAML_INSTR_SETUP (tmr, ""); CAMLassert (v == Val_unit); caml_minor_collection (); CAML_INSTR_TIME (tmr, "explicit/gc_minor"); return Val_unit; }
CAMLprim value caml_runtime_parameters (value unit) { #define F_Z ARCH_INTNAT_PRINTF_FORMAT #define F_S ARCH_SIZET_PRINTF_FORMAT CAMLassert (unit == Val_unit); /* TODO KC */ return caml_alloc_sprintf ("caml_runtime_parameters not implemented: %d", 0); }
static value next_minor_block(caml_domain_state* domain_state, value curr_hp) { mlsize_t wsz; header_t hd; value curr_val; CAMLassert ((value)domain_state->young_ptr <= curr_hp); CAMLassert (curr_hp < (value)domain_state->young_end); hd = Hd_hp(curr_hp); curr_val = Val_hp(curr_hp); if (hd == 0) { /* Forwarded object, find the promoted version */ curr_val = Op_val(curr_val)[0]; } CAMLassert (Is_block(curr_val) && Hd_val(curr_val) != 0 && Tag_val(curr_val) != Infix_tag); wsz = Wosize_val(curr_val); CAMLassert (wsz <= Max_young_wosize); return curr_hp + Bsize_wsize(Whsize_wosize(wsz)); }
/* If [*v] is an [Infix_tag] object, [v] is updated to point to the first * object in the block. */ static inline void resolve_infix_val (value* v) { int offset = 0; if (Hd_val(*v) == Infix_tag) { offset = Infix_offset_val(*v); CAMLassert (offset > 0); *v -= offset; } }
CAMLexport CAMLweakdef void caml_modify (value *fp, value val) { /* The write barrier implemented by [caml_modify] checks for the following two conditions and takes appropriate action: 1- a pointer from the major heap to the minor heap is created --> add [fp] to the remembered set 2- a pointer from the major heap to the major heap is overwritten, while the GC is in the marking phase --> call [caml_darken] on the overwritten pointer so that the major GC treats it as an additional root. */ value old; if (Is_young((value)fp)) { /* The modified object resides in the minor heap. Conditions 1 and 2 cannot occur. */ *fp = val; } else { /* The modified object resides in the major heap. */ CAMLassert(Is_in_heap(fp)); old = *fp; *fp = val; if (Is_block(old)) { /* If [old] is a pointer within the minor heap, we already have a major->minor pointer and [fp] is already in the remembered set. Conditions 1 and 2 cannot occur. */ if (Is_young(old)) return; /* Here, [old] can be a pointer within the major heap. Check for condition 2. */ if (caml_gc_phase == Phase_mark) caml_darken(old, NULL); } /* Check for condition 1. */ if (Is_block(val) && Is_young(val)) { /* Add [fp] to remembered set */ if (caml_ref_table.ptr >= caml_ref_table.limit){ CAMLassert (caml_ref_table.ptr == caml_ref_table.limit); caml_realloc_ref_table (&caml_ref_table); } *caml_ref_table.ptr++ = fp; } } }
CAMLprim value caml_runtime_variant (value unit) { CAMLassert (unit == Val_unit); #if defined (DEBUG) return caml_copy_string ("d"); #elif defined (CAML_INSTR) return caml_copy_string ("i"); #else return caml_copy_string (""); #endif }
/* PR#6084 workaround: define it as a weak symbol */ CAMLexport CAMLweakdef void caml_initialize (value *fp, value val) { CAMLassert(Is_in_heap(fp)); *fp = val; if (Is_block (val) && Is_young (val)) { if (caml_ref_table.ptr >= caml_ref_table.limit){ caml_realloc_ref_table (&caml_ref_table); } *caml_ref_table.ptr++ = fp; } }
CAMLprim value caml_gc_major_slice (value v) { intnat res; CAMLassert (Is_long (v)); caml_ev_pause(EV_PAUSE_GC); caml_empty_minor_heap (); res = caml_major_collection_slice(Long_val(v), 0); caml_ev_resume(); caml_handle_gc_interrupt(); return Val_long (res); }
static void read_main_debug_info(struct debug_info *di) { CAMLparam0(); CAMLlocal3(events, evl, l); char_os *exec_name; int fd, num_events, orig, i; struct channel *chan; struct exec_trailer trail; CAMLassert(di->already_read == 0); di->already_read = 1; if (caml_params->cds_file != NULL) { exec_name = (char_os*) caml_params->cds_file; } else { exec_name = (char_os*) caml_params->exe_name; } fd = caml_attempt_open(&exec_name, &trail, 1); if (fd < 0){ caml_fatal_error ("executable program file not found"); CAMLreturn0; } caml_read_section_descriptors(fd, &trail); if (caml_seek_optional_section(fd, &trail, "DBUG") != -1) { chan = caml_open_descriptor_in(fd); num_events = caml_getword(chan); events = caml_alloc(num_events, 0); for (i = 0; i < num_events; i++) Op_val(events)[i] = Val_unit; for (i = 0; i < num_events; i++) { orig = caml_getword(chan); evl = caml_input_val(chan); caml_input_val(chan); /* Skip the list of absolute directory names */ /* Relocate events in event list */ for (l = evl; l != Val_int(0); l = Field_imm(l, 1)) { value ev = Field_imm(l, 0); Store_field (ev, EV_POS, Val_long(Long_val(Field(ev, EV_POS)) + orig)); } /* Record event list */ Store_field(events, i, evl); } caml_close_channel(chan); di->events = process_debug_events(caml_start_code, events, &di->num_events); } CAMLreturn0; }
CAMLprim value caml_get_major_bucket (value v) { long i = Long_val (v); if (i < 0) caml_invalid_argument ("Gc.get_bucket"); if (i < caml_major_window){ i += caml_major_ring_index; if (i >= caml_major_window) i -= caml_major_window; CAMLassert (0 <= i && i < caml_major_window); return Val_long ((long) (caml_major_ring[i] * 1e6)); }else{ return Val_long (0); } }
CAMLexport void caml_ba_serialize(value v, uintnat * wsize_32, uintnat * wsize_64) { struct caml_ba_array * b = Caml_ba_array_val(v); intnat num_elts; int i; /* Serialize header information */ caml_serialize_int_4(b->num_dims); caml_serialize_int_4(b->flags & (CAML_BA_KIND_MASK | CAML_BA_LAYOUT_MASK)); /* On a 64-bit machine, if any of the dimensions is >= 2^32, the size of the marshaled data will be >= 2^32 and extern_value() will fail. So, it is safe to write the dimensions as 32-bit unsigned integers. */ for (i = 0; i < b->num_dims; i++) caml_serialize_int_4(b->dim[i]); /* Compute total number of elements */ num_elts = 1; for (i = 0; i < b->num_dims; i++) num_elts = num_elts * b->dim[i]; /* Serialize elements */ switch (b->flags & CAML_BA_KIND_MASK) { case CAML_BA_CHAR: case CAML_BA_SINT8: case CAML_BA_UINT8: caml_serialize_block_1(b->data, num_elts); break; case CAML_BA_SINT16: case CAML_BA_UINT16: caml_serialize_block_2(b->data, num_elts); break; case CAML_BA_FLOAT32: case CAML_BA_INT32: caml_serialize_block_4(b->data, num_elts); break; case CAML_BA_COMPLEX32: caml_serialize_block_4(b->data, num_elts * 2); break; case CAML_BA_FLOAT64: case CAML_BA_INT64: caml_serialize_block_8(b->data, num_elts); break; case CAML_BA_COMPLEX64: caml_serialize_block_8(b->data, num_elts * 2); break; case CAML_BA_CAML_INT: caml_ba_serialize_longarray(b->data, num_elts, -0x40000000, 0x3FFFFFFF); break; case CAML_BA_NATIVE_INT: caml_ba_serialize_longarray(b->data, num_elts, -0x80000000, 0x7FFFFFFF); break; } /* Compute required size in OCaml heap. Assumes struct caml_ba_array is exactly 4 + num_dims words */ CAMLassert(SIZEOF_BA_ARRAY == 4 * sizeof(value)); *wsize_32 = (4 + b->num_dims) * 4; *wsize_64 = (4 + b->num_dims) * 8; }
CAMLexport value caml_ba_alloc_dims(int flags, int num_dims, void * data, ...) { va_list ap; intnat dim[CAML_BA_MAX_NUM_DIMS]; int i; value res; CAMLassert(num_dims <= CAML_BA_MAX_NUM_DIMS); va_start(ap, data); for (i = 0; i < num_dims; i++) dim[i] = va_arg(ap, intnat); va_end(ap); res = caml_ba_alloc(flags, num_dims, data, dim); return res; }
/* [caml_ba_alloc] will allocate a new bigarray object in the heap. If [data] is NULL, the memory for the contents is also allocated (with [malloc]) by [caml_ba_alloc]. [data] cannot point into the OCaml heap. [dim] may point into an object in the OCaml heap. */ CAMLexport value caml_ba_alloc(int flags, int num_dims, void * data, intnat * dim) { uintnat num_elts, asize, size; int i; value res; struct caml_ba_array * b; intnat dimcopy[CAML_BA_MAX_NUM_DIMS]; CAMLassert(num_dims >= 0 && num_dims <= CAML_BA_MAX_NUM_DIMS); CAMLassert((flags & CAML_BA_KIND_MASK) <= CAML_BA_CHAR); for (i = 0; i < num_dims; i++) dimcopy[i] = dim[i]; size = 0; if (data == NULL) { num_elts = 1; for (i = 0; i < num_dims; i++) { if (caml_umul_overflow(num_elts, dimcopy[i], &num_elts)) caml_raise_out_of_memory(); } if (caml_umul_overflow(num_elts, caml_ba_element_size[flags & CAML_BA_KIND_MASK], &size)) caml_raise_out_of_memory(); data = malloc(size); if (data == NULL && size != 0) caml_raise_out_of_memory(); flags |= CAML_BA_MANAGED; } asize = SIZEOF_BA_ARRAY + num_dims * sizeof(intnat); res = caml_alloc_custom(&caml_ba_ops, asize, size, CAML_BA_MAX_MEMORY); b = Caml_ba_array_val(res); b->data = data; b->num_dims = num_dims; b->flags = flags; b->proxy = NULL; for (i = 0; i < num_dims; i++) b->dim[i] = dimcopy[i]; return res; }
static void do_set (value ar, mlsize_t offset, value v) { if (Is_block (v) && Is_young (v)){ /* modified version of Modify */ value old = Field (ar, offset); Field (ar, offset) = v; if (!(Is_block (old) && Is_young (old))){ if (caml_weak_ref_table.ptr >= caml_weak_ref_table.limit){ CAMLassert (caml_weak_ref_table.ptr == caml_weak_ref_table.limit); caml_realloc_ref_table (&caml_weak_ref_table); } *caml_weak_ref_table.ptr++ = &Field (ar, offset); } }else{ Field (ar, offset) = v; } }
CAMLexport const value* caml_named_value(char const *name) { struct named_value * nv; caml_root ret = NULL; caml_plat_lock(&named_value_lock); for (nv = named_value_table[hash_value_name(name)]; nv != NULL; nv = nv->next) { if (strcmp(name, nv->name) == 0){ ret = nv->val; break; } } caml_plat_unlock(&named_value_lock); /* *ret should never be a minor object, since caml_create_root promotes */ CAMLassert (!(ret && Is_minor(caml_read_root(ret)))); return Op_val(ret); }
int caml_write_fd(int fd, int flags, void * buf, int n) { int retcode; again: caml_enter_blocking_section(); retcode = write(fd, buf, n); caml_leave_blocking_section(); if (retcode == -1) { if (errno == EINTR) goto again; if ((errno == EAGAIN || errno == EWOULDBLOCK) && n > 1) { /* We couldn't do a partial write here, probably because n <= PIPE_BUF and POSIX says that writes of less than PIPE_BUF characters must be atomic. We first try again with a partial write of 1 character. If that fails too, we'll return an error code. */ n = 1; goto again; } } if (retcode == -1) caml_sys_io_error(NO_ARG); CAMLassert (retcode > 0); return retcode; }
void forward_pointer (void* state, value v, value *p) { header_t hd; mlsize_t offset; value fwd; struct domain* promote_domain = state; caml_domain_state* domain_state = promote_domain ? promote_domain->state : Caml_state; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; if (Is_block (v) && is_in_interval((value)Hp_val(v), young_ptr, young_end)) { hd = Hd_val(v); if (hd == 0) { *p = Op_val(v)[0]; CAMLassert (Is_block(*p) && !Is_minor(*p)); } else if (Tag_hd(hd) == Infix_tag) { offset = Infix_offset_hd(hd); fwd = 0; forward_pointer (state, v - offset, &fwd); if (fwd) *p = fwd + offset; } } }
CAMLexport void caml_ba_finalize(value v) { struct caml_ba_array * b = Caml_ba_array_val(v); switch (b->flags & CAML_BA_MANAGED_MASK) { case CAML_BA_EXTERNAL: break; case CAML_BA_MANAGED: if (b->proxy == NULL) { free(b->data); } else { if (-- b->proxy->refcount == 0) { free(b->proxy->data); free(b->proxy); } } break; case CAML_BA_MAPPED_FILE: /* Bigarrays for mapped files use a different finalization method */ default: CAMLassert(0); } }
CAMLprim value caml_get_major_credit (value v) { CAMLassert (v == Val_unit); return Val_long ((long) (caml_major_work_credit * 1e6)); }
void caml_alloc_point_here() { CAMLassert(noalloc_level == 0); }
void caml_noalloc_end(int* noalloc) { int curr = --noalloc_level; CAMLassert(*noalloc == curr); }
CAMLprim value caml_ml_runtime_warnings_enabled(value unit) { CAMLassert (unit == Val_unit); return Val_bool(caml_runtime_warnings); }