Example #1
0
/* Finish the work that was put off by [caml_oldify_one].
   Note that [caml_oldify_one] itself is called by oldify_mopup, so we
   have to be careful to remove the first entry from the list before
   oldifying its fields. */
static void caml_oldify_mopup (void)
{
  value v, new_v, f;
  mlsize_t i;

  while (oldify_todo_list != 0){
    v = oldify_todo_list;                 /* Get the head. */
    Assert (Hd_val (v) == 0);             /* It must be forwarded. */
    new_v = Op_val (v)[0];                /* Follow forward pointer. */
    if (Tag_val(new_v) == Stack_tag) {
      oldify_todo_list = Op_val (v)[1];   /* Remove from list (stack) */
      caml_scan_stack(caml_oldify_one, new_v);
    } else {
      oldify_todo_list = Op_val (new_v)[1]; /* Remove from list (non-stack) */

      f = Op_val (new_v)[0];
      if (Is_block (f) && Is_young (f)){
        caml_oldify_one (f, Op_val (new_v));
      }
      for (i = 1; i < Wosize_val (new_v); i++){
        f = Op_val (v)[i];
        if (Is_block (f) && Is_young (f)){
          caml_oldify_one (f, Op_val (new_v) + i);
        }else{
          Op_val (new_v)[i] = f;
        }
      }
    }
  }
}
Example #2
0
static void verify_object(value v) {
  if (!Is_block(v)) return;

  if (Tag_val(v) == Infix_tag) {
    v -= Infix_offset_val(v);
    Assert(Tag_val(v) == Closure_tag);
  }

  intnat* entry = caml_addrmap_insert_pos(&verify_seen, v);
  if (*entry != ADDRMAP_NOT_PRESENT) return;
  *entry = 1;

  if (Has_status_hd(Hd_val(v), NOT_MARKABLE)) return;
  verify_objs++;

  if (!Is_minor(v)) {
    Assert(Has_status_hd(Hd_val(v), global.MARKED));
  }

  if (Tag_val(v) == Stack_tag) {
    caml_scan_stack(verify_push, v);
  } else if (Tag_val(v) < No_scan_tag) {
    int i;
    for (i = 0; i < Wosize_val(v); i++) {
      value f = Op_val(v)[i];
      if (Is_minor(v) && Is_minor(f)) {
        Assert(caml_owner_of_young_block(v) ==
               caml_owner_of_young_block(f));
      }
      if (Is_block(f)) verify_push(f, 0);
    }
  }
}
Example #3
0
static void verify_object(struct heap_verify_state* st, value v) {
  if (!Is_block(v)) return;

  Assert (Hd_val(v));
  if (Tag_val(v) == Infix_tag) {
    v -= Infix_offset_val(v);
    Assert(Tag_val(v) == Closure_tag);
  }

  intnat* entry = caml_addrmap_insert_pos(&st->seen, v);
  if (*entry != ADDRMAP_NOT_PRESENT) return;
  *entry = 1;

  if (Has_status_hd(Hd_val(v), NOT_MARKABLE)) return;
  st->objs++;

  // caml_gc_log ("verify_object: v=0x%lx hd=0x%lx tag=%u", v, Hd_val(v), Tag_val(v));
  if (!Is_minor(v)) {
    Assert(Has_status_hd(Hd_val(v), global.UNMARKED));
  }

  if (Tag_val(v) == Stack_tag) {
    caml_scan_stack(verify_push, st, v);
  } else if (Tag_val(v) < No_scan_tag) {
    int i;
    for (i = 0; i < Wosize_val(v); i++) {
      value f = Op_val(v)[i];
      if (Is_minor(v) && Is_minor(f)) {
        Assert(caml_owner_of_young_block(v) ==
               caml_owner_of_young_block(f));
      }
      if (Is_block(f)) verify_push(st, f, 0);
    }
  }
}
Example #4
0
static intnat mark(value initial, intnat budget) {
  value next = initial;
  int found_next = 1;
  while (budget > 0 && found_next) {
    value v = next;
    header_t hd_v;
    found_next = 0;

    Assert(Is_markable(v));
    Assert(v == mark_normalise(v));

    stat_blocks_marked++;
    /* mark the current object */
    hd_v = Hd_val(v);
    // caml_gc_log ("mark: v=0x%lx hd=0x%lx tag=%d sz=%lu",
    //             v, hd_v, Tag_val(v), Wosize_val(v));
    if (Tag_hd (hd_v) == Stack_tag) {
      // caml_gc_log ("mark: stack=%p", (value*)v);
      caml_scan_stack(&caml_darken, v);
    } else if (Tag_hd (hd_v) < No_scan_tag) {
      int i;
      for (i = 0; i < Wosize_hd(hd_v); i++) {
        value child = Op_val(v)[i];
        // caml_gc_log ("mark: v=%p i=%u child=%p",(value*)v,i,(value*)child);
        /* FIXME: this is wrong, as Debug_tag(N) is a valid value.
           However, it's a useful debugging aid for now */
        Assert(!Is_debug_tag(child));
        if (Is_markable(child)) {
          child = mark_normalise(child);
          if (caml_mark_object(child)) {
            if (!found_next) {
              next = child;
              found_next = 1;
            } else {
              mark_stack_push(child);
            }
          }
        }
      }
    }
    budget -= Whsize_hd(hd_v);

    /* if we haven't found any markable children, pop an object to mark */
    if (!found_next) {
      found_next = mark_stack_pop(&next);
    }
  }
  if (found_next) {
    mark_stack_push(next);
  }
  return budget;
}
Example #5
0
static intnat mark(value initial, intnat budget) {
  value next = initial;
  int found_next = 1;
  while (budget > 0 && found_next) {
    value v = next;
    header_t hd_v;
    found_next = 0;

    Assert(Is_markable(v));
    Assert(v == mark_normalise(v));

    stat_blocks_marked++;
    /* mark the current object */
    hd_v = Hd_val(v);
    if (Tag_hd (hd_v) == Stack_tag) {
      caml_scan_stack(&caml_darken, v);
    } else if (Tag_hd (hd_v) < No_scan_tag) {
      int i;
      for (i = 0; i < Wosize_hd(hd_v); i++) {
        value child = Field(v, i);
        if (Is_markable(child)) {
          child = mark_normalise(child);
          if (caml_mark_object(child)) {
            if (!found_next) {
              next = child;
              found_next = 1;
            } else {
              mark_stack_push(child);
            }
          }
        }
      }
    }
    budget -= Whsize_hd(hd_v);

    /* if we haven't found any markable children, pop an object to mark */
    if (!found_next) {
      found_next = mark_stack_pop(&next);
    }
  }
  if (found_next) {
    mark_stack_push(next);
  }
  return budget;
}
Example #6
0
static value promote_stack(struct domain* domain, value stack)
{
  caml_gc_log("Promoting stack");
  Assert(Tag_val(stack) == Stack_tag);
  if (Is_minor(stack)) {
    /* First, promote the actual stack object */
    Assert(caml_owner_of_young_block(stack) == domain);
    /* Stacks are only referenced via fibers, so we don't bother
       using the promotion_table */
    void* new_stack = caml_shared_try_alloc(domain->shared_heap, Wosize_val(stack), Stack_tag, 0);
    if (!new_stack) caml_fatal_error("allocation failure during stack promotion");
    memcpy(Op_hp(new_stack), (void*)stack, Wosize_val(stack) * sizeof(value));
    stack = Val_hp(new_stack);
  }

  /* Promote each object on the stack. */
  promote_domain = domain;
  caml_scan_stack(&promote_stack_elem, stack);
  /* Since we've promoted the objects on the stack, the stack is now clean. */
  caml_clean_stack_domain(stack, domain);
  return stack;
}
Example #7
0
void caml_empty_minor_heap_domain (struct domain* domain)
{
  CAMLnoalloc;
  caml_domain_state* domain_state = domain->state;
  struct caml_minor_tables *minor_tables = domain_state->minor_tables;
  unsigned rewrite_successes = 0;
  unsigned rewrite_failures = 0;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  uintnat minor_allocated_bytes = young_end - young_ptr;
  struct oldify_state st = {0};
  value **r;
  struct caml_ephe_ref_elt *re;
  struct caml_custom_elt *elt;

  st.promote_domain = domain;

  if (minor_allocated_bytes != 0) {
    uintnat prev_alloc_words = domain_state->allocated_words;

#ifdef DEBUG
    /* In DEBUG mode, verify that the minor_ref table contains all young-young pointers
       from older to younger objects */
    {
    struct addrmap young_young_ptrs = ADDRMAP_INIT;
    mlsize_t i;
    value iter;
    for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) {
      *caml_addrmap_insert_pos(&young_young_ptrs, (value)*r) = 1;
    }
    for (iter = (value)young_ptr;
         iter < (value)young_end;
         iter = next_minor_block(domain_state, iter)) {
      value hd = Hd_hp(iter);
      if (hd != 0) {
        value curr = Val_hp(iter);
        tag_t tag = Tag_hd (hd);

        if (tag < No_scan_tag && tag != Cont_tag) {
          // FIXME: should scan Cont_tag
          for (i = 0; i < Wosize_hd(hd); i++) {
            value* f = Op_val(curr) + i;
            if (Is_block(*f) && is_in_interval(*f, young_ptr, young_end) &&
                *f < curr) {
              CAMLassert(caml_addrmap_contains(&young_young_ptrs, (value)f));
            }
          }
        }
      }
    }
    caml_addrmap_clear(&young_young_ptrs);
    }
#endif

    caml_gc_log ("Minor collection of domain %d starting", domain->state->id);
    caml_ev_begin("minor_gc");
    caml_ev_begin("minor_gc/roots");
    caml_do_local_roots(&oldify_one, &st, domain, 0);

    caml_scan_stack(&oldify_one, &st, domain_state->current_stack);

    for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) {
      value x = **r;
      oldify_one (&st, x, &x);
    }
    caml_ev_end("minor_gc/roots");

    caml_ev_begin("minor_gc/promote");
    oldify_mopup (&st);
    caml_ev_end("minor_gc/promote");

    caml_ev_begin("minor_gc/ephemerons");
    for (re = minor_tables->ephe_ref.base;
         re < minor_tables->ephe_ref.ptr; re++) {
      CAMLassert (Ephe_domain(re->ephe) == domain);
      if (re->offset == CAML_EPHE_DATA_OFFSET) {
        /* Data field has already been handled in oldify_mopup. Handle only
         * keys here. */
        continue;
      }
      value* key = &Op_val(re->ephe)[re->offset];
      if (*key != caml_ephe_none && Is_block(*key) &&
          is_in_interval(*key, young_ptr, young_end)) {
        resolve_infix_val(key);
        if (Hd_val(*key) == 0) { /* value copied to major heap */
          *key = Op_val(*key)[0];
        } else {
          CAMLassert(!ephe_check_alive_data(re,young_ptr,young_end));
          *key = caml_ephe_none;
          Ephe_data(re->ephe) = caml_ephe_none;
        }
      }
    }
    caml_ev_end("minor_gc/ephemerons");

    caml_ev_begin("minor_gc/update_minor_tables");
    for (r = minor_tables->major_ref.base;
         r < minor_tables->major_ref.ptr; r++) {
      value v = **r;
      if (Is_block (v) && is_in_interval ((value)Hp_val(v), young_ptr, young_end)) {
        value vnew;
        header_t hd = Hd_val(v);
        int offset = 0;
        if (Tag_hd(hd) == Infix_tag) {
          offset = Infix_offset_hd(hd);
          v -= offset;
        }
        CAMLassert (Hd_val(v) == 0);
        vnew = Op_val(v)[0] + offset;
        CAMLassert (Is_block(vnew) && !Is_minor(vnew));
        CAMLassert (Hd_val(vnew));
        if (Tag_hd(hd) == Infix_tag) {
          CAMLassert(Tag_val(vnew) == Infix_tag);
          v += offset;
        }
        if (caml_domain_alone()) {
          **r = vnew;
          ++rewrite_successes;
        } else {
          if (atomic_compare_exchange_strong((atomic_value*)*r, &v, vnew))
            ++rewrite_successes;
          else
            ++rewrite_failures;
        }
      }
    }
    CAMLassert (!caml_domain_alone() || rewrite_failures == 0);
    caml_ev_end("minor_gc/update_minor_tables");

    caml_ev_begin("minor_gc/finalisers");
    caml_final_update_last_minor(domain);
    /* Run custom block finalisation of dead minor values */
    for (elt = minor_tables->custom.base; elt < minor_tables->custom.ptr; elt++) {
      value v = elt->block;
      if (Hd_val(v) == 0) {
        /* !!caml_adjust_gc_speed(elt->mem, elt->max); */
      } else {
        /* Block will be freed: call finalisation function, if any */
        void (*final_fun)(value) = Custom_ops_val(v)->finalize;
        if (final_fun != NULL) final_fun(v);
      }
    }
    caml_final_empty_young(domain);
    caml_ev_end("minor_gc/finalisers");


    clear_table ((struct generic_table *)&minor_tables->major_ref);
    clear_table ((struct generic_table *)&minor_tables->minor_ref);
    clear_table ((struct generic_table *)&minor_tables->ephe_ref);
    clear_table ((struct generic_table *)&minor_tables->custom);

    domain_state->young_ptr = domain_state->young_end;
    domain_state->stat_minor_words += Wsize_bsize (minor_allocated_bytes);
    domain_state->stat_minor_collections++;
    domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words;

    caml_ev_end("minor_gc");
    caml_gc_log ("Minor collection of domain %d completed: %2.0f%% of %u KB live, rewrite: successes=%u failures=%u",
                 domain->state->id,
                 100.0 * (double)st.live_bytes / (double)minor_allocated_bytes,
                 (unsigned)(minor_allocated_bytes + 512)/1024, rewrite_successes, rewrite_failures);
  }
  else {
    caml_final_empty_young(domain);
    caml_gc_log ("Minor collection of domain %d: skipping", domain->state->id);
  }

#ifdef DEBUG
  {
    value *p;
    for (p = (value *) domain_state->young_start;
         p < (value *) domain_state->young_end; ++p){
      *p = Debug_free_minor;
    }
  }
#endif
}
Example #8
0
CAMLexport value caml_promote(struct domain* domain, value root)
{
  value **r;
  value iter, f;
  mlsize_t i;
  caml_domain_state* domain_state = domain->state;
  struct caml_minor_tables *minor_tables = domain_state->minor_tables;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  float percent_to_scan;
  uintnat prev_alloc_words = domain_state->allocated_words;
  struct oldify_state st = {0};
  struct caml_ephe_ref_elt *re;

  /* Integers are already shared */
  if (Is_long(root))
    return root;

  /* Objects which are in the major heap are already shared. */
  if (!Is_minor(root))
    return root;

  st.oldest_promoted = (value)domain_state->young_start;
  st.promote_domain = domain;

  CAMLassert(caml_owner_of_young_block(root) == domain);
  oldify_one (&st, root, &root);
  oldify_mopup (&st);

  CAMLassert (!Is_minor(root));
  /* FIXME: surely a newly-allocated root is already darkened? */
  caml_darken(0, root, 0);

  percent_to_scan = st.oldest_promoted <= (value)young_ptr ? 0.0 :
    (((float)(st.oldest_promoted - (value)young_ptr)) * 100.0 /
     ((value)young_end - (value)domain_state->young_start));

  if (percent_to_scan > Percent_to_promote_with_GC) {
    caml_gc_log("caml_promote: forcing minor GC. %%_minor_to_scan=%f", percent_to_scan);
    // ???
    caml_empty_minor_heap_domain (domain);
  } else {
    caml_do_local_roots (&forward_pointer, st.promote_domain, domain, 1);
    caml_scan_stack (&forward_pointer, st.promote_domain, domain_state->current_stack);

    /* Scan major to young pointers. */
    for (r = minor_tables->major_ref.base;
         r < minor_tables->major_ref.ptr; r++) {
      value old_p = **r;
      if (Is_block(old_p) && is_in_interval(old_p,young_ptr,young_end)) {
        value new_p = old_p;
        forward_pointer (st.promote_domain, new_p, &new_p);
        if (old_p != new_p) {
          if (caml_domain_alone())
            **r = new_p;
          else
            atomic_compare_exchange_strong((atomic_value*)*r, &old_p, new_p);
        }
      }
    }

    /* Scan ephemeron ref table */
    for (re = minor_tables->ephe_ref.base;
         re < minor_tables->ephe_ref.ptr; re++) {
      value* key = &Op_val(re->ephe)[re->offset];
      if (Is_block(*key) && is_in_interval(*key,young_ptr,young_end)) {
        forward_pointer (st.promote_domain, *key, key);
      }
    }

    /* Scan young to young pointers */
    for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) {
      forward_pointer (st.promote_domain, **r, *r);
    }

    /* Scan newer objects */
    for (iter = (value)young_ptr;
         iter <= st.oldest_promoted;
         iter = next_minor_block(domain_state, iter)) {
      value hd = Hd_hp(iter);
      value curr = Val_hp(iter);
      if (hd != 0) {
        tag_t tag = Tag_hd (hd);
        if (tag == Cont_tag) {
          struct stack_info* stk = Ptr_val(Op_val(curr)[0]);
          if (stk != NULL)
            caml_scan_stack(&forward_pointer, st.promote_domain, stk);
        } else if (tag < No_scan_tag) {
          for (i = 0; i < Wosize_hd (hd); i++) {
            f = Op_val(curr)[i];
            if (Is_block(f)) {
              forward_pointer (st.promote_domain, f,((value*)curr) + i);
            }
          }
        }
      }
    }
  }
  domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words;
  return root;
}
Example #9
0
/* Note that the tests on the tag depend on the fact that Infix_tag,
   Forward_tag, and No_scan_tag are contiguous. */
static void oldify_one (void* st_v, value v, value *p)
{
  struct oldify_state* st = st_v;
  value result;
  header_t hd;
  mlsize_t sz, i;
  mlsize_t infix_offset;
  tag_t tag;
  caml_domain_state* domain_state =
    st->promote_domain ? st->promote_domain->state : Caml_state;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  CAMLassert (domain_state->young_start <= domain_state->young_ptr &&
          domain_state->young_ptr <= domain_state->young_end);

 tail_call:
  if (!(Is_block(v) && is_in_interval((value)Hp_val(v), young_ptr, young_end))) {
    /* not a minor block */
    *p = v;
    return;
  }

  infix_offset = 0;
  do {
    hd = Hd_val (v);
    if (hd == 0) {
      /* already forwarded, forward pointer is first field. */
      *p = Op_val(v)[0] + infix_offset;
      return;
    }
    tag = Tag_hd (hd);
    if (tag == Infix_tag) {
      /* Infix header, retry with the real block */
      CAMLassert (infix_offset == 0);
      infix_offset = Infix_offset_hd (hd);
      CAMLassert(infix_offset > 0);
      v -= infix_offset;
    }
  } while (tag == Infix_tag);

  if (((value)Hp_val(v)) > st->oldest_promoted) {
    st->oldest_promoted = (value)Hp_val(v);
  }

  if (tag == Cont_tag) {
    struct stack_info* stk = Ptr_val(Op_val(v)[0]);
    CAMLassert(Wosize_hd(hd) == 1 && infix_offset == 0);
    result = alloc_shared(1, Cont_tag);
    *p = result;
    Op_val(result)[0] = Val_ptr(stk);
    *Hp_val (v) = 0;
    Op_val(v)[0] = result;
    if (stk != NULL)
      caml_scan_stack(&oldify_one, st, stk);
  } else if (tag < Infix_tag) {
    value field0;
    sz = Wosize_hd (hd);
    st->live_bytes += Bhsize_hd(hd);
    result = alloc_shared (sz, tag);
    *p = result + infix_offset;
    field0 = Op_val(v)[0];
    CAMLassert (!Is_debug_tag(field0));
    *Hp_val (v) = 0;           /* Set forward flag */
    Op_val(v)[0] = result;     /*  and forward pointer. */
    if (sz > 1){
      Op_val (result)[0] = field0;
      Op_val (result)[1] = st->todo_list;    /* Add this block */
      st->todo_list = v;                     /*  to the "to do" list. */
    }else{
      CAMLassert (sz == 1);
      p = Op_val(result);
      v = field0;
      goto tail_call;
    }
  } else if (tag >= No_scan_tag) {
    sz = Wosize_hd (hd);
    st->live_bytes += Bhsize_hd(hd);
    result = alloc_shared(sz, tag);
    for (i = 0; i < sz; i++) {
      value curr = Op_val(v)[i];
      Op_val (result)[i] = curr;
    }
    *Hp_val (v) = 0;           /* Set forward flag */
    Op_val (v)[0] = result;    /*  and forward pointer. */
    CAMLassert (infix_offset == 0);
    *p = result;
  } else {
    CAMLassert (tag == Forward_tag);
    CAMLassert (infix_offset == 0);

    value f = Forward_val (v);
    tag_t ft = 0;

    if (Is_block (f)) {
      ft = Tag_val (Hd_val (f) == 0 ? Op_val (f)[0] : f);
    }

    if (ft == Forward_tag || ft == Lazy_tag || ft == Double_tag) {
      /* Do not short-circuit the pointer.  Copy as a normal block. */
      CAMLassert (Wosize_hd (hd) == 1);
      st->live_bytes += Bhsize_hd(hd);
      result = alloc_shared (1, Forward_tag);
      *p = result;
      *Hp_val (v) = 0;             /* Set (GC) forward flag */
      Op_val (v)[0] = result;      /*  and forward pointer. */
      p = Op_val (result);
      v = f;
      goto tail_call;
    } else {
      v = f;                        /* Follow the forwarding */
      goto tail_call;               /*  then oldify. */
    }
  }
}