Exemplo n.º 1
0
static void verify_swept (struct caml_heap_state* local) {
  int i;
  struct mem_stats pool_stats = {}, large_stats = {};
  /* sweeping should be done by this point */
  Assert(local->next_to_sweep == NUM_SIZECLASSES);
  for (i = 0; i < NUM_SIZECLASSES; i++) {
    Assert(local->unswept_avail_pools[i] == 0 &&
           local->unswept_full_pools[i] == 0);
    pool* p;
    for (p = local->avail_pools[i]; p; p = p->next)
      verify_pool(p, i, &pool_stats);
    for (p = local->full_pools[i]; p; p = p->next) {
      Assert(p->next_obj == 0);
      verify_pool(p, i, &pool_stats);
    }
  }
  caml_gc_log("Pooled memory: %lu alloced, %lu free, %lu fragmentation",
              pool_stats.alloced, pool_stats.free, pool_stats.overhead);

  verify_large(local->swept_large, &large_stats);
  Assert(local->unswept_large == 0);
  caml_gc_log("Large memory: %lu alloced, %lu free, %lu fragmentation",
              large_stats.alloced, large_stats.free, large_stats.overhead);

  /* Check stats are being computed correctly */
  Assert(local->stats.pool_words == pool_stats.alloced);
  Assert(local->stats.pool_live_words == pool_stats.live);
  Assert(local->stats.pool_live_blocks == pool_stats.live_blocks);
  Assert(local->stats.pool_frag_words == pool_stats.overhead);
  Assert(local->stats.pool_words -
         (local->stats.pool_live_words + local->stats.pool_frag_words)
         == pool_stats.free);
  Assert(local->stats.large_words == large_stats.alloced);
  Assert(local->stats.large_blocks == large_stats.live_blocks);
}
Exemplo n.º 2
0
void caml_cycle_heap(struct caml_heap_state* local) {
  int i, received_p = 0, received_l = 0;
  caml_gc_log("Cycling heap [%02d]", local->owner->state->id);
  for (i = 0; i < NUM_SIZECLASSES; i++) {
    local->unswept_avail_pools[i] = local->avail_pools[i];
    local->avail_pools[i] = 0;
    local->unswept_full_pools[i] = local->full_pools[i];
    local->full_pools[i] = 0;
  }
  local->unswept_large = local->swept_large;
  local->swept_large = 0;

  caml_plat_lock(&pool_freelist.lock);
  for (i = 0; i < NUM_SIZECLASSES; i++) {
    received_p += move_all_pools(&pool_freelist.global_avail_pools[i],
                                 &local->unswept_avail_pools[i],
                                 local->owner);
    received_p += move_all_pools(&pool_freelist.global_full_pools[i],
                                 &local->unswept_full_pools[i],
                                 local->owner);
  }
  while (pool_freelist.global_large) {
    large_alloc* a = pool_freelist.global_large;
    pool_freelist.global_large = a->next;
    a->owner = local->owner;
    a->next = local->unswept_large;
    local->unswept_large = a;
    received_l++;
  }
  caml_plat_unlock(&pool_freelist.lock);
  if (received_p || received_l)
    caml_gc_log("Received %d new pools, %d new large allocs", received_p, received_l);

  local->next_to_sweep = 0;
}
Exemplo n.º 3
0
void caml_realloc_ref_table (struct caml_ref_table *tbl)
{                                           Assert (tbl->ptr == tbl->limit);
                                            Assert (tbl->limit <= tbl->end);
                                      Assert (tbl->limit >= tbl->threshold);

  if (tbl->base == NULL){
    caml_alloc_table (tbl, caml_minor_heap_size / sizeof (value) / 8, 256);
  }else if (tbl->limit == tbl->threshold){
    caml_gc_log ("ref_table threshold crossed");
    tbl->limit = tbl->end;
    caml_urge_major_slice ();
  }else{ /* This will almost never happen with the bytecode interpreter. */
    asize_t sz;
    asize_t cur_ptr = tbl->ptr - tbl->base;

    tbl->size *= 2;
    sz = (tbl->size + tbl->reserve) * sizeof (struct caml_ref_entry);
    caml_gc_log ("Growing ref_table to %"
                 ARCH_INTNAT_PRINTF_FORMAT "dk bytes\n",
                     (intnat) sz/1024);
    tbl->base = (struct caml_ref_entry*) caml_stat_resize ((char *) tbl->base, sz);
    if (tbl->base == NULL){
      caml_fatal_error ("Fatal error: ref_table overflow\n");
    }
    tbl->end = tbl->base + tbl->size + tbl->reserve;
    tbl->threshold = tbl->base + tbl->size;
    tbl->ptr = tbl->base + cur_ptr;
    tbl->limit = tbl->end;
  }
}
Exemplo n.º 4
0
int main(int argc, char **argv)
{
#ifdef DEBUG
  caml_gc_log ("### OCaml runtime: debug mode ###");
#if 0
  {
    int i;
    char *ocp;
    char *cp;

    ocp = getenv ("OCAMLRUNPARAM");
    caml_gc_log ("### OCAMLRUNPARAM=%s", ocp == NULL ? "" : ocp);
    cp = getenv ("CAMLRUNPARAM");
    caml_gc_log ("### CAMLRUNPARAM=%s", cp == NULL ? "" : cp);
    caml_gc_log ("### working dir: %s", getcwd (NULL, 0));
  }
#endif
#endif
#ifdef _WIN32
  /* Expand wildcards and diversions in command line */
  caml_expand_command_line(&argc, &argv);
#endif
  caml_main(argv);
  caml_sys_exit(Val_int(0));
  return 0; /* not reached */
}
Exemplo n.º 5
0
void caml_teardown_shared_heap(struct caml_heap_state* heap) {
  int i;
  int released = 0, released_large = 0;
  caml_plat_lock(&pool_freelist.lock);
  heap->num_free_pools -=
    move_all_pools(&heap->free_pools, &pool_freelist.free, NULL);
  Assert(heap->num_free_pools == 0);
  for (i = 0; i < NUM_SIZECLASSES; i++) {
    released +=
      move_all_pools(&heap->avail_pools[i], &pool_freelist.global_avail_pools[i], NULL);
    released +=
      move_all_pools(&heap->full_pools[i], &pool_freelist.global_full_pools[i], NULL);
    /* should be swept by now */
    Assert(!heap->unswept_avail_pools[i]);
    Assert(!heap->unswept_full_pools[i]);
  }
  Assert(!heap->unswept_large);
  while (heap->swept_large) {
    large_alloc* a = heap->swept_large;
    heap->swept_large = a->next;
    a->next = pool_freelist.global_large;
    pool_freelist.global_large = a;
    released_large++;
  }
  /* FIXME: do something with stats */
  caml_plat_unlock(&pool_freelist.lock);
  caml_stat_free(heap);
  caml_gc_log("Shutdown shared heap. Released %d active pools, %d large",
              released, released_large);
}
Exemplo n.º 6
0
static void domain_terminate() {
  caml_gc_log("Domain terminating");
  caml_enter_blocking_section();

  /* FIXME: proper domain termination and reuse */
  /* interrupt_word_address must not go away */
  pause();
}
Exemplo n.º 7
0
void print_trace (void)
{
  void *array[10];
  size_t size;
  char **strings;
  size_t i;

  size = backtrace (array, 10);
  strings = backtrace_symbols (array, size);

  caml_gc_log ("Obtained %zd stack frames.", size);

  for (i = 0; i < size; i++)
    caml_gc_log ("%s", strings[i]);

  free (strings);
}
Exemplo n.º 8
0
void caml_empty_mark_stack () {
  value v;

  while (mark_stack_pop(&v)) mark(v, 10000000);

  if (stat_blocks_marked)
    caml_gc_log("Finished marking major heap. Marked %u blocks", (unsigned)stat_blocks_marked);
  stat_blocks_marked = 0;
}
Exemplo n.º 9
0
CAMLprim value caml_gc_major(value v)
{                                                    Assert (v == Val_unit);
  caml_gc_log ("Major GC cycle requested");
  caml_ev_pause(EV_PAUSE_GC);
  caml_empty_minor_heap ();
  caml_finish_major_cycle();
  caml_final_do_calls ();
  caml_ev_resume();
  return Val_unit;
}
Exemplo n.º 10
0
void caml_realloc_stack () {
  CAMLparam0();
  CAMLlocal2(old_stack, new_stack);
  /* All sizes are in bytes */
  asize_t size;
  uintnat stack_used;

  old_stack = caml_current_stack;
  stack_used = Stack_sp(old_stack);
  size = Wosize_val(old_stack);
  size *= 2;

  caml_gc_log ("Growing old_stack=0x%lx to %lu words\n", old_stack, size);
  new_stack = caml_alloc(size, Stack_tag);
  caml_gc_log ("New_stack=0x%lx\n", new_stack);

  memcpy(Stack_high(new_stack) - stack_used,
         Stack_high(old_stack) - stack_used,
         stack_used);

  Stack_sp(new_stack) = Stack_sp(old_stack);
  Stack_handle_value(new_stack) = Stack_handle_value(old_stack);
  Stack_handle_exception(new_stack) = Stack_handle_exception(old_stack);
  Stack_handle_effect(new_stack) = Stack_handle_effect(old_stack);
  Stack_parent(new_stack) = Stack_parent(old_stack);
  Stack_dirty(new_stack) = Val_long(0);

  if (Stack_dirty(old_stack) == Val_long(1)) {
    dirty_stack(new_stack);
  }

  load_stack(new_stack);

  /* Reset old stack */
  Stack_sp(old_stack) = 0;
  Stack_dirty(old_stack) = Val_long(0);
  Stack_handle_value(old_stack) = Val_long(0);
  Stack_handle_exception(old_stack) = Val_long(0);
  Stack_handle_effect(old_stack) = Val_long(0);
  Stack_parent(old_stack) = Val_unit;

  CAMLreturn0;
}
Exemplo n.º 11
0
static struct pool* find_pool_to_rescan()
{
  struct pool* p;
  caml_plat_lock(&pools_to_rescan_lock);
  if (pools_to_rescan_count > 0) {
    p = pools_to_rescan[--pools_to_rescan_count];
    caml_gc_log("Redarkening pool %p (%d others left)", p, pools_to_rescan_count);
  } else {
    p = 0;
  }
  caml_plat_unlock(&pools_to_rescan_lock);
  return p;
}
Exemplo n.º 12
0
unsigned caml_plat_spin_wait(unsigned spins,
                             const char* file, int line,
                             const char* function)
{
  if (spins < Min_sleep_ns) spins = Min_sleep_ns;
  if (spins > Max_sleep_ns) spins = Max_sleep_ns;
  unsigned next_spins = spins + spins / 4;
  if (spins < Slow_sleep_ns && Slow_sleep_ns <= next_spins) {
    caml_gc_log("Slow spin-wait loop in %s at %s:%d", function, file, line);
  }
  usleep(spins/1000);
  return next_spins;
}
Exemplo n.º 13
0
void caml_cycle_heap(struct caml_heap_state* local) {
  int i;
  caml_gc_log("Cycling heap [%02d]", local->owner->id);
  for (i = 0; i < NUM_SIZECLASSES; i++) {
    local->unswept_avail_pools[i] = local->avail_pools[i];
    local->avail_pools[i] = 0;
    local->unswept_full_pools[i] = local->full_pools[i];
    local->full_pools[i] = 0;
  }
  local->next_to_sweep = 0;

  local->unswept_large = local->swept_large;
  local->swept_large = 0;
}
Exemplo n.º 14
0
CAMLprim value caml_gc_full_major(value v)
{
  int i;
  caml_gc_log ("Full Major GC requested");
  caml_ev_pause(EV_PAUSE_GC);
  /* In general, it can require up to 3 GC cycles for a
     currently-unreachable object to be collected. */
  for (i = 0; i < 3; i++) {
    caml_empty_minor_heap();
    caml_finish_major_cycle();
    caml_final_do_calls ();
  }
  caml_ev_resume();
  return Val_unit;
}
Exemplo n.º 15
0
/* Open the given shared library and add it to shared_libs.
   Abort on error. */
static void open_shared_lib(char * name)
{
  char * realname;
  void * handle;

  realname = caml_search_dll_in_path(&caml_shared_libs_path, name);
  caml_gc_log("Loading shared library %s", realname);
  caml_enter_blocking_section();
  handle = caml_dlopen(realname, 1, 1);
  caml_leave_blocking_section();
  if (handle == NULL)
    caml_fatal_error_arg2("Fatal error: cannot load shared library %s\n", name,
                          "Reason: %s\n", caml_dlerror());
  caml_ext_table_add(&shared_libs, handle);
  caml_stat_free(realname);
}
Exemplo n.º 16
0
static void verify_heap() {
  caml_save_stack_gc();

  caml_do_local_roots(&verify_push, caml_domain_self());
  caml_scan_global_roots(&verify_push);
  while (verify_sp) verify_object(verify_stack[--verify_sp]);
  caml_gc_log("Verify: %lu objs", verify_objs);

  caml_addrmap_clear(&verify_seen);
  verify_objs = 0;
  caml_stat_free(verify_stack);
  verify_stack = 0;
  verify_stack_len = 0;
  verify_sp = 0;
  caml_restore_stack_gc();
}
Exemplo n.º 17
0
CAMLprim value caml_dynlink_open_lib(value mode, value filename)
{
  void * handle;
  value result;
  char * p;

  caml_gc_log("Opening shared library %s", String_val(filename));
  p = caml_strdup(String_val(filename));
  caml_enter_blocking_section();
  handle = caml_dlopen(p, Int_val(mode), 1);
  caml_leave_blocking_section();
  caml_stat_free(p);

  if (handle == NULL) caml_failwith(caml_dlerror());
  result = caml_alloc_small(1, Abstract_tag);
  Handle_val(result) = handle;
  return result;
}
Exemplo n.º 18
0
void caml_init_gc ()
{
/*  uintnat major_heap_size =
      Bsize_wsize (caml_normalize_heap_increment (caml_params->heap_size_init)); */

  caml_max_stack_size = caml_params->init_max_stack_wsz;
  caml_fiber_wsz = caml_params->init_fiber_wsz;
  caml_percent_free = norm_pfree (caml_params->init_percent_free);
  caml_gc_log ("Initial stack limit: %luk bytes",
               caml_max_stack_size / 1024 * sizeof (value));
  caml_setup_eventlog();
  caml_gc_phase = Phase_sweep_and_mark_main;
  #ifdef NATIVE_CODE
  caml_init_frame_descriptors();
  #endif
  caml_init_domains(caml_params->init_minor_heap_wsz);
/*
  caml_major_heap_increment = major_incr;
  caml_percent_free = norm_pfree (percent_fr);
  caml_percent_max = norm_pmax (percent_m);
  caml_init_major_heap (major_heap_size);
  caml_gc_message (0x20, "Initial minor heap size: %luk bytes\n",
                   Caml_state->minor_heap_size / 1024);
  caml_gc_message (0x20, "Initial major heap size: %luk bytes\n",
                   major_heap_size / 1024);
  caml_gc_message (0x20, "Initial space overhead: %"
                   ARCH_INTNAT_PRINTF_FORMAT "u%%\n", caml_percent_free);
  caml_gc_message (0x20, "Initial max overhead: %"
                   ARCH_INTNAT_PRINTF_FORMAT "u%%\n", caml_percent_max);
  if (caml_major_heap_increment > 1000){
    caml_gc_message (0x20, "Initial heap increment: %"
                     ARCH_INTNAT_PRINTF_FORMAT "uk words\n",
                     caml_major_heap_increment / 1024);
  }else{
    caml_gc_message (0x20, "Initial heap increment: %"
                     ARCH_INTNAT_PRINTF_FORMAT "u%%\n",
                     caml_major_heap_increment);
  }
  caml_gc_message (0x20, "Initial allocation policy: %d\n",
                   caml_allocation_policy);
*/
}
Exemplo n.º 19
0
intnat caml_major_collection_slice(intnat howmuch)
{
  intnat computed_work = howmuch ? howmuch : default_slice_budget();
  intnat budget = computed_work;
  intnat sweep_work, mark_work;
  uintnat blocks_marked_before = stat_blocks_marked;
  value v;

  caml_save_stack_gc();

  sweep_work = budget;
  budget = caml_sweep(caml_domain_self()->shared_heap, budget);
  sweep_work -= budget;

  if (gc_phase == Phase_idle) {
    caml_do_local_roots(&caml_darken, caml_domain_self());
    caml_scan_global_roots(&caml_darken);
    gc_phase = Phase_marking;
  }

  mark_work = budget;
  if (mark_stack_pop(&v))
    budget = mark(v, budget);
  mark_work -= budget;

  caml_gc_log("Major slice: %lu alloc, %ld work, %ld sweep, %ld mark (%lu blocks)",
              (unsigned long)caml_domain_state->allocated_words,
              (long)computed_work, (long)sweep_work, (long)mark_work,
              (unsigned long)(stat_blocks_marked - blocks_marked_before));
  caml_domain_state->allocated_words = 0;
  caml_restore_stack_gc();

  if (budget > 0) {
    caml_trigger_stw_gc();
    caml_handle_gc_interrupt();
  }


  return computed_work;
}
Exemplo n.º 20
0
static value promote_stack(struct domain* domain, value stack)
{
  caml_gc_log("Promoting stack");
  Assert(Tag_val(stack) == Stack_tag);
  if (Is_minor(stack)) {
    /* First, promote the actual stack object */
    Assert(caml_owner_of_young_block(stack) == domain);
    /* Stacks are only referenced via fibers, so we don't bother
       using the promotion_table */
    void* new_stack = caml_shared_try_alloc(domain->shared_heap, Wosize_val(stack), Stack_tag, 0);
    if (!new_stack) caml_fatal_error("allocation failure during stack promotion");
    memcpy(Op_hp(new_stack), (void*)stack, Wosize_val(stack) * sizeof(value));
    stack = Val_hp(new_stack);
  }

  /* Promote each object on the stack. */
  promote_domain = domain;
  caml_scan_stack(&promote_stack_elem, stack);
  /* Since we've promoted the objects on the stack, the stack is now clean. */
  caml_clean_stack_domain(stack, domain);
  return stack;
}
Exemplo n.º 21
0
CAMLprim value caml_realloc_global(value size)
{
  mlsize_t requested_size, actual_size, i;
  value old_global_data = caml_read_root(caml_global_data);
  value new_global_data;

  requested_size = Long_val(size);
  actual_size = Wosize_val(old_global_data);
  if (requested_size >= actual_size) {
    requested_size = (requested_size + 0x100) & 0xFFFFFF00;
    caml_gc_log ("Growing global data to %u entries",
                 (unsigned)requested_size);
    new_global_data = caml_alloc_shr(requested_size, 0);
    for (i = 0; i < actual_size; i++)
      caml_initialize_field(new_global_data, i, Field(old_global_data, i));
    for (i = actual_size; i < requested_size; i++){
      caml_initialize_field(new_global_data, i, Val_long(0));
    }
    caml_modify_root(caml_global_data, new_global_data);
  }
  return Val_unit;
}
Exemplo n.º 22
0
value caml_alloc_stack (value hval, value hexn, value heff) {
  CAMLparam3(hval, hexn, heff);
  CAMLlocal1(stack);
  char* sp;
  struct caml_context *ctxt;

  stack = caml_alloc(caml_init_fiber_wsz, Stack_tag);
  Stack_dirty(stack) = Val_long(0);
  Stack_handle_value(stack) = hval;
  Stack_handle_exception(stack) = hexn;
  Stack_handle_effect(stack) = heff;
  Stack_parent(stack) = Val_unit;

  sp = Stack_high(stack);
  /* Fiber exception handler that returns to parent */
  sp -= sizeof(value);
  *(value**)sp = (value*)caml_fiber_exn_handler;
  /* No previous exception frame */
  sp -= sizeof(value);
  *(uintnat*)sp = 0;
  /* Value handler that returns to parent */
  sp -= sizeof(value);
  *(value**)sp = (value*)caml_fiber_val_handler;

  /* Build a context */
  sp -= sizeof(struct caml_context);
  ctxt = (struct caml_context*)sp;
  ctxt->exception_ptr_offset = 2 * sizeof(value);
  ctxt->gc_regs = NULL;
  Stack_sp(stack) = 3 * sizeof(value) + sizeof(struct caml_context);

  caml_gc_log ("Allocate stack=0x%lx of %lu words\n",
               stack, caml_init_fiber_wsz);

  CAMLreturn (stack);
}
Exemplo n.º 23
0
/* Same, then raise Out_of_memory */
static void compare_stack_overflow(void)
{
  caml_gc_log ("Stack overflow in structural comparison");
  compare_free_stack();
  caml_raise_out_of_memory();
}
Exemplo n.º 24
0
/* Make sure the minor heap is empty by performing a minor collection if
 * needed. */
void caml_empty_minor_heap (void)
{
  uintnat minor_allocated_bytes = caml_domain_state->young_end - caml_domain_state->young_ptr;
  unsigned rewritten = 0;
  struct caml_ref_entry *r;

  caml_save_stack_gc();

  stat_live_bytes = 0;

  if (minor_allocated_bytes != 0){
    caml_gc_log ("Minor collection starting");
    caml_do_local_roots(&caml_oldify_one, caml_domain_self());

    for (r = caml_domain_state->remembered_set->ref.base; r < caml_domain_state->remembered_set->ref.ptr; r++){
      value x;
      caml_oldify_one (Op_val(r->obj)[r->field], &x);
    }

    for (r = caml_domain_state->remembered_set->fiber_ref.base; r < caml_domain_state->remembered_set->fiber_ref.ptr; r++) {
      caml_scan_dirty_stack(&caml_oldify_one, r->obj);
    }

    caml_oldify_mopup ();

    for (r = caml_domain_state->remembered_set->ref.base; r < caml_domain_state->remembered_set->ref.ptr; r++){
      value v = Op_val(r->obj)[r->field];
      if (Is_block(v) && Is_young(v)) {
        Assert (Hp_val (v) >= caml_domain_state->young_ptr);
        value vnew;
        header_t hd = Hd_val(v);
        // FIXME: call oldify_one here?
        if (Is_promoted_hd(hd)) {
          vnew = caml_addrmap_lookup(&caml_domain_state->remembered_set->promotion, v);
        } else {
          int offset = 0;
          if (Tag_hd(hd) == Infix_tag) {
            offset = Infix_offset_hd(hd);
            v -= offset;
          }
          Assert (Hd_val (v) == 0);
          vnew = Op_val(v)[0] + offset;
        }
        Assert(Is_block(vnew) && !Is_young(vnew));
        Assert(Hd_val(vnew));
        if (Tag_hd(hd) == Infix_tag) { Assert(Tag_val(vnew) == Infix_tag); }
        rewritten += caml_atomic_cas_field(r->obj, r->field, v, vnew);
      }
    }

    caml_addrmap_iter(&caml_domain_state->remembered_set->promotion, unpin_promoted_object);

    if (caml_domain_state->young_ptr < caml_domain_state->young_start)
      caml_domain_state->young_ptr = caml_domain_state->young_start;
    caml_stat_minor_words += Wsize_bsize (minor_allocated_bytes);
    caml_domain_state->young_ptr = caml_domain_state->young_end;
    clear_table (&caml_domain_state->remembered_set->ref);
    caml_addrmap_clear(&caml_domain_state->remembered_set->promotion);
    caml_addrmap_clear(&caml_domain_state->remembered_set->promotion_rev);
    caml_gc_log ("Minor collection completed: %u of %u kb live, %u pointers rewritten",
                 (unsigned)stat_live_bytes/1024, (unsigned)minor_allocated_bytes/1024, rewritten);
  }

  for (r = caml_domain_state->remembered_set->fiber_ref.base; r < caml_domain_state->remembered_set->fiber_ref.ptr; r++) {
    caml_scan_dirty_stack(&caml_darken, r->obj);
    caml_clean_stack(r->obj);
  }
  clear_table (&caml_domain_state->remembered_set->fiber_ref);

  caml_restore_stack_gc();

#ifdef DEBUG
  {
    value *p;
    for (p = (value *) caml_domain_state->young_start;
         p < (value *) caml_domain_state->young_end; ++p){
      *p = Debug_free_minor;
    }
    ++ minor_gc_counter;
  }
#endif
}
Exemplo n.º 25
0
CAMLexport value caml_promote(struct domain* domain, value root)
{
  value **r;
  value iter, f;
  mlsize_t i;
  caml_domain_state* domain_state = domain->state;
  struct caml_minor_tables *minor_tables = domain_state->minor_tables;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  float percent_to_scan;
  uintnat prev_alloc_words = domain_state->allocated_words;
  struct oldify_state st = {0};
  struct caml_ephe_ref_elt *re;

  /* Integers are already shared */
  if (Is_long(root))
    return root;

  /* Objects which are in the major heap are already shared. */
  if (!Is_minor(root))
    return root;

  st.oldest_promoted = (value)domain_state->young_start;
  st.promote_domain = domain;

  CAMLassert(caml_owner_of_young_block(root) == domain);
  oldify_one (&st, root, &root);
  oldify_mopup (&st);

  CAMLassert (!Is_minor(root));
  /* FIXME: surely a newly-allocated root is already darkened? */
  caml_darken(0, root, 0);

  percent_to_scan = st.oldest_promoted <= (value)young_ptr ? 0.0 :
    (((float)(st.oldest_promoted - (value)young_ptr)) * 100.0 /
     ((value)young_end - (value)domain_state->young_start));

  if (percent_to_scan > Percent_to_promote_with_GC) {
    caml_gc_log("caml_promote: forcing minor GC. %%_minor_to_scan=%f", percent_to_scan);
    // ???
    caml_empty_minor_heap_domain (domain);
  } else {
    caml_do_local_roots (&forward_pointer, st.promote_domain, domain, 1);
    caml_scan_stack (&forward_pointer, st.promote_domain, domain_state->current_stack);

    /* Scan major to young pointers. */
    for (r = minor_tables->major_ref.base;
         r < minor_tables->major_ref.ptr; r++) {
      value old_p = **r;
      if (Is_block(old_p) && is_in_interval(old_p,young_ptr,young_end)) {
        value new_p = old_p;
        forward_pointer (st.promote_domain, new_p, &new_p);
        if (old_p != new_p) {
          if (caml_domain_alone())
            **r = new_p;
          else
            atomic_compare_exchange_strong((atomic_value*)*r, &old_p, new_p);
        }
      }
    }

    /* Scan ephemeron ref table */
    for (re = minor_tables->ephe_ref.base;
         re < minor_tables->ephe_ref.ptr; re++) {
      value* key = &Op_val(re->ephe)[re->offset];
      if (Is_block(*key) && is_in_interval(*key,young_ptr,young_end)) {
        forward_pointer (st.promote_domain, *key, key);
      }
    }

    /* Scan young to young pointers */
    for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) {
      forward_pointer (st.promote_domain, **r, *r);
    }

    /* Scan newer objects */
    for (iter = (value)young_ptr;
         iter <= st.oldest_promoted;
         iter = next_minor_block(domain_state, iter)) {
      value hd = Hd_hp(iter);
      value curr = Val_hp(iter);
      if (hd != 0) {
        tag_t tag = Tag_hd (hd);
        if (tag == Cont_tag) {
          struct stack_info* stk = Ptr_val(Op_val(curr)[0]);
          if (stk != NULL)
            caml_scan_stack(&forward_pointer, st.promote_domain, stk);
        } else if (tag < No_scan_tag) {
          for (i = 0; i < Wosize_hd (hd); i++) {
            f = Op_val(curr)[i];
            if (Is_block(f)) {
              forward_pointer (st.promote_domain, f,((value*)curr) + i);
            }
          }
        }
      }
    }
  }
  domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words;
  return root;
}
Exemplo n.º 26
0
void caml_empty_minor_heap_domain (struct domain* domain)
{
  CAMLnoalloc;
  caml_domain_state* domain_state = domain->state;
  struct caml_minor_tables *minor_tables = domain_state->minor_tables;
  unsigned rewrite_successes = 0;
  unsigned rewrite_failures = 0;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  uintnat minor_allocated_bytes = young_end - young_ptr;
  struct oldify_state st = {0};
  value **r;
  struct caml_ephe_ref_elt *re;
  struct caml_custom_elt *elt;

  st.promote_domain = domain;

  if (minor_allocated_bytes != 0) {
    uintnat prev_alloc_words = domain_state->allocated_words;

#ifdef DEBUG
    /* In DEBUG mode, verify that the minor_ref table contains all young-young pointers
       from older to younger objects */
    {
    struct addrmap young_young_ptrs = ADDRMAP_INIT;
    mlsize_t i;
    value iter;
    for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) {
      *caml_addrmap_insert_pos(&young_young_ptrs, (value)*r) = 1;
    }
    for (iter = (value)young_ptr;
         iter < (value)young_end;
         iter = next_minor_block(domain_state, iter)) {
      value hd = Hd_hp(iter);
      if (hd != 0) {
        value curr = Val_hp(iter);
        tag_t tag = Tag_hd (hd);

        if (tag < No_scan_tag && tag != Cont_tag) {
          // FIXME: should scan Cont_tag
          for (i = 0; i < Wosize_hd(hd); i++) {
            value* f = Op_val(curr) + i;
            if (Is_block(*f) && is_in_interval(*f, young_ptr, young_end) &&
                *f < curr) {
              CAMLassert(caml_addrmap_contains(&young_young_ptrs, (value)f));
            }
          }
        }
      }
    }
    caml_addrmap_clear(&young_young_ptrs);
    }
#endif

    caml_gc_log ("Minor collection of domain %d starting", domain->state->id);
    caml_ev_begin("minor_gc");
    caml_ev_begin("minor_gc/roots");
    caml_do_local_roots(&oldify_one, &st, domain, 0);

    caml_scan_stack(&oldify_one, &st, domain_state->current_stack);

    for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) {
      value x = **r;
      oldify_one (&st, x, &x);
    }
    caml_ev_end("minor_gc/roots");

    caml_ev_begin("minor_gc/promote");
    oldify_mopup (&st);
    caml_ev_end("minor_gc/promote");

    caml_ev_begin("minor_gc/ephemerons");
    for (re = minor_tables->ephe_ref.base;
         re < minor_tables->ephe_ref.ptr; re++) {
      CAMLassert (Ephe_domain(re->ephe) == domain);
      if (re->offset == CAML_EPHE_DATA_OFFSET) {
        /* Data field has already been handled in oldify_mopup. Handle only
         * keys here. */
        continue;
      }
      value* key = &Op_val(re->ephe)[re->offset];
      if (*key != caml_ephe_none && Is_block(*key) &&
          is_in_interval(*key, young_ptr, young_end)) {
        resolve_infix_val(key);
        if (Hd_val(*key) == 0) { /* value copied to major heap */
          *key = Op_val(*key)[0];
        } else {
          CAMLassert(!ephe_check_alive_data(re,young_ptr,young_end));
          *key = caml_ephe_none;
          Ephe_data(re->ephe) = caml_ephe_none;
        }
      }
    }
    caml_ev_end("minor_gc/ephemerons");

    caml_ev_begin("minor_gc/update_minor_tables");
    for (r = minor_tables->major_ref.base;
         r < minor_tables->major_ref.ptr; r++) {
      value v = **r;
      if (Is_block (v) && is_in_interval ((value)Hp_val(v), young_ptr, young_end)) {
        value vnew;
        header_t hd = Hd_val(v);
        int offset = 0;
        if (Tag_hd(hd) == Infix_tag) {
          offset = Infix_offset_hd(hd);
          v -= offset;
        }
        CAMLassert (Hd_val(v) == 0);
        vnew = Op_val(v)[0] + offset;
        CAMLassert (Is_block(vnew) && !Is_minor(vnew));
        CAMLassert (Hd_val(vnew));
        if (Tag_hd(hd) == Infix_tag) {
          CAMLassert(Tag_val(vnew) == Infix_tag);
          v += offset;
        }
        if (caml_domain_alone()) {
          **r = vnew;
          ++rewrite_successes;
        } else {
          if (atomic_compare_exchange_strong((atomic_value*)*r, &v, vnew))
            ++rewrite_successes;
          else
            ++rewrite_failures;
        }
      }
    }
    CAMLassert (!caml_domain_alone() || rewrite_failures == 0);
    caml_ev_end("minor_gc/update_minor_tables");

    caml_ev_begin("minor_gc/finalisers");
    caml_final_update_last_minor(domain);
    /* Run custom block finalisation of dead minor values */
    for (elt = minor_tables->custom.base; elt < minor_tables->custom.ptr; elt++) {
      value v = elt->block;
      if (Hd_val(v) == 0) {
        /* !!caml_adjust_gc_speed(elt->mem, elt->max); */
      } else {
        /* Block will be freed: call finalisation function, if any */
        void (*final_fun)(value) = Custom_ops_val(v)->finalize;
        if (final_fun != NULL) final_fun(v);
      }
    }
    caml_final_empty_young(domain);
    caml_ev_end("minor_gc/finalisers");


    clear_table ((struct generic_table *)&minor_tables->major_ref);
    clear_table ((struct generic_table *)&minor_tables->minor_ref);
    clear_table ((struct generic_table *)&minor_tables->ephe_ref);
    clear_table ((struct generic_table *)&minor_tables->custom);

    domain_state->young_ptr = domain_state->young_end;
    domain_state->stat_minor_words += Wsize_bsize (minor_allocated_bytes);
    domain_state->stat_minor_collections++;
    domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words;

    caml_ev_end("minor_gc");
    caml_gc_log ("Minor collection of domain %d completed: %2.0f%% of %u KB live, rewrite: successes=%u failures=%u",
                 domain->state->id,
                 100.0 * (double)st.live_bytes / (double)minor_allocated_bytes,
                 (unsigned)(minor_allocated_bytes + 512)/1024, rewrite_successes, rewrite_failures);
  }
  else {
    caml_final_empty_young(domain);
    caml_gc_log ("Minor collection of domain %d: skipping", domain->state->id);
  }

#ifdef DEBUG
  {
    value *p;
    for (p = (value *) domain_state->young_start;
         p < (value *) domain_state->young_end; ++p){
      *p = Debug_free_minor;
    }
  }
#endif
}
Exemplo n.º 27
0
static void mark_stack_prune ()
{
  struct addrmap t = ADDRMAP_INIT;
  int count = 0, entry;
  addrmap_iterator i;
  uintnat mark_stack_count = caml_domain_state->mark_stack_count;
  value* mark_stack = caml_domain_state->mark_stack;

  /* space used by the computations below */
  uintnat table_max = mark_stack_count / 100;
  if (table_max < 1000) table_max = 1000;

  /* amount of space we want to free up */
  int entries_to_free = (uintnat)(mark_stack_count * 0.20);

  /* We compress the mark stack by removing all of the objects from a
     subset of pools, which are rescanned later. For efficiency, we
     want to select those pools which occur most frequently, so that
     we need to rescan as few pools as possible. However, we do not
     have space to build a complete histogram.

     Using ~1% of the mark stack's space, we can find all of the
     elements that occur at least 100 times using the Misra-Gries
     heavy hitter algorithm (see J. Misra and D. Gries, "Finding
     repeated elements", 1982). */

  for (entry = 0; entry < mark_stack_count; entry++) {
    struct pool* pool = caml_pool_of_shared_block(mark_stack[entry]);
    if (!pool) continue;
    value p = (value)pool;
    if (caml_addrmap_contains(&t, p)) {
      /* if it's already present, increase the count */
      (*caml_addrmap_insert_pos(&t, p)) ++;
    } else if (count < table_max) {
      /* if there's space, insert it with count 1 */
      *caml_addrmap_insert_pos(&t, p) = 1;
      count++;
    } else {
      /* otherwise, decrease all entries by 1 */
      struct addrmap s = ADDRMAP_INIT;
      int scount = 0;
      for (i = caml_addrmap_iterator(&t);
           caml_addrmap_iter_ok(&t, i);
           i = caml_addrmap_next(&t, i)) {
        value k = caml_addrmap_iter_key(&t, i);
        value v = caml_addrmap_iter_value(&t, i);
        if (v > 1) {
          *caml_addrmap_insert_pos(&s, k) = v - 1;
          scount++;
        }
      }
      caml_addrmap_clear(&t);
      t = s;
      count = scount;
    }
  }

  /* t now contains all pools that occur at least 100 times.
     If no pools occur at least 100 times, t is some arbitrary subset of pools.
     Next, we get an accurate count of the occurrences of the pools in t */

  for (i = caml_addrmap_iterator(&t);
       caml_addrmap_iter_ok(&t, i);
       i = caml_addrmap_next(&t, i)) {
    *caml_addrmap_iter_val_pos(&t, i) = 0;
  }
  for (entry = 0; entry < mark_stack_count; entry++) {
    value p = (value)caml_pool_of_shared_block(mark_stack[entry]);
    if (p && caml_addrmap_contains(&t, p))
      (*caml_addrmap_insert_pos(&t, p))++;
  }

  /* Next, find a subset of those pools that covers enough entries */

  struct pool_count* pools = caml_stat_alloc(count * sizeof(struct pool_count));
  int pos = 0;
  for (i = caml_addrmap_iterator(&t);
       caml_addrmap_iter_ok(&t, i);
       i = caml_addrmap_next(&t, i)) {
    struct pool_count* p = &pools[pos++];
    p->pool = (struct pool*)caml_addrmap_iter_key(&t, i);
    p->occurs = (int)caml_addrmap_iter_value(&t, i);
  }
  Assert(pos == count);
  caml_addrmap_clear(&t);

  qsort(pools, count, sizeof(struct pool_count), &pool_count_cmp);

  int start = count, total = 0;
  while (start > 0 && total < entries_to_free) {
    start--;
    total += pools[start].occurs;
  }



  for (i = start; i < count; i++) {
    *caml_addrmap_insert_pos(&t, (value)pools[i].pool) = 1;
  }
  int out = 0;
  for (entry = 0; entry < mark_stack_count; entry++) {
    value v = mark_stack[entry];
    value p = (value)caml_pool_of_shared_block(v);
    if (!(p && caml_addrmap_contains(&t, p))) {
      mark_stack[out++] = v;
    }
  }
  caml_domain_state->mark_stack_count = out;

  caml_gc_log("Mark stack overflow. Postponing %d pools (%.1f%%, leaving %d).",
              count-start, 100. * (double)total / (double)mark_stack_count,
              (int)caml_domain_state->mark_stack_count);


  /* Add the pools to rescan to the global list.

     This must be done after the mark stack is filtered, since other
     threads race to remove pools from the global list. As soon as
     pools_to_rescan_lock is released, we cannot rely on pools being
     in the global list. */

  caml_plat_lock(&pools_to_rescan_lock);
  for (i = start; i < count; i++) {
    if (pools_to_rescan_count == pools_to_rescan_len) {
      pools_to_rescan_len = pools_to_rescan_len * 2 + 128;
      pools_to_rescan =
        caml_stat_resize(pools_to_rescan, pools_to_rescan_len * sizeof(struct pool*));
    }
    pools_to_rescan[pools_to_rescan_count++] = pools[i].pool;
  }
  caml_plat_unlock(&pools_to_rescan_lock);
}