示例#1
0
static void verify_object(struct heap_verify_state* st, value v) {
  if (!Is_block(v)) return;

  Assert (Hd_val(v));
  if (Tag_val(v) == Infix_tag) {
    v -= Infix_offset_val(v);
    Assert(Tag_val(v) == Closure_tag);
  }

  intnat* entry = caml_addrmap_insert_pos(&st->seen, v);
  if (*entry != ADDRMAP_NOT_PRESENT) return;
  *entry = 1;

  if (Has_status_hd(Hd_val(v), NOT_MARKABLE)) return;
  st->objs++;

  // caml_gc_log ("verify_object: v=0x%lx hd=0x%lx tag=%u", v, Hd_val(v), Tag_val(v));
  if (!Is_minor(v)) {
    Assert(Has_status_hd(Hd_val(v), global.UNMARKED));
  }

  if (Tag_val(v) == Stack_tag) {
    caml_scan_stack(verify_push, st, v);
  } else if (Tag_val(v) < No_scan_tag) {
    int i;
    for (i = 0; i < Wosize_val(v); i++) {
      value f = Op_val(v)[i];
      if (Is_minor(v) && Is_minor(f)) {
        Assert(caml_owner_of_young_block(v) ==
               caml_owner_of_young_block(f));
      }
      if (Is_block(f)) verify_push(st, f, 0);
    }
  }
}
示例#2
0
static void verify_object(value v) {
  if (!Is_block(v)) return;

  if (Tag_val(v) == Infix_tag) {
    v -= Infix_offset_val(v);
    Assert(Tag_val(v) == Closure_tag);
  }

  intnat* entry = caml_addrmap_insert_pos(&verify_seen, v);
  if (*entry != ADDRMAP_NOT_PRESENT) return;
  *entry = 1;

  if (Has_status_hd(Hd_val(v), NOT_MARKABLE)) return;
  verify_objs++;

  if (!Is_minor(v)) {
    Assert(Has_status_hd(Hd_val(v), global.MARKED));
  }

  if (Tag_val(v) == Stack_tag) {
    caml_scan_stack(verify_push, v);
  } else if (Tag_val(v) < No_scan_tag) {
    int i;
    for (i = 0; i < Wosize_val(v); i++) {
      value f = Op_val(v)[i];
      if (Is_minor(v) && Is_minor(f)) {
        Assert(caml_owner_of_young_block(v) ==
               caml_owner_of_young_block(f));
      }
      if (Is_block(f)) verify_push(f, 0);
    }
  }
}
示例#3
0
struct pool* caml_pool_of_shared_block(value v)
{
  Assert (Is_block(v) && !Is_minor(v));
  mlsize_t whsize = Whsize_wosize(Wosize_val(v));
  if (whsize > 0 && whsize <= SIZECLASS_MAX) {
    return (pool*)((uintnat)v &~(POOL_WSIZE * sizeof(value) - 1));
  } else {
    return 0;
  }
}
示例#4
0
static value caml_promote_one(struct promotion_stack* stk, struct domain* domain, value curr)
{
  header_t curr_block_hd;
  int infix_offset = 0;
  if (Is_long(curr) || !Is_minor(curr))
    return curr; /* needs no promotion */

  Assert(caml_owner_of_young_block(curr) == domain);

  curr_block_hd = Hd_val(curr);

  if (Tag_hd(curr_block_hd) == Infix_tag) {
    infix_offset = Infix_offset_val(curr);
    curr -= infix_offset;
    curr_block_hd = Hd_val(curr);
  }

  if (Is_promoted_hd(curr_block_hd)) {
    /* already promoted */
    return caml_addrmap_lookup(&domain->state->remembered_set->promotion, curr) + infix_offset;
  } else if (curr_block_hd == 0) {
    /* promoted by minor GC */
    return Op_val(curr)[0] + infix_offset;
  }

  /* otherwise, must promote */
  void* mem = caml_shared_try_alloc(domain->shared_heap, Wosize_hd(curr_block_hd),
                                           Tag_hd(curr_block_hd), 1);
  if (!mem) caml_fatal_error("allocation failure during promotion");
  value promoted = Val_hp(mem);
  Hd_val(curr) = Promotedhd_hd(curr_block_hd);

  caml_addrmap_insert(&domain->state->remembered_set->promotion, curr, promoted);
  caml_addrmap_insert(&domain->state->remembered_set->promotion_rev, promoted, curr);

  if (Tag_hd(curr_block_hd) >= No_scan_tag) {
    int i;
    for (i = 0; i < Wosize_hd(curr_block_hd); i++)
      Op_val(promoted)[i] = Op_val(curr)[i];
  } else {
    /* push to stack */
    if (stk->sp == stk->stack_len) {
      stk->stack_len = 2 * (stk->stack_len + 10);
      stk->stack = caml_stat_resize(stk->stack,
          sizeof(struct promotion_stack_entry) * stk->stack_len);
    }
    stk->stack[stk->sp].local = curr;
    stk->stack[stk->sp].global = promoted;
    stk->stack[stk->sp].field = 0;
    stk->sp++;
  }
  return promoted + infix_offset;
}
示例#5
0
static void scan_global_roots(scanning_action f)
{
  value r, newr;
  caml_plat_lock(&roots_mutex);
  r = roots_all;
  caml_plat_unlock(&roots_mutex);
  
  Assert(!Is_minor(r));
  newr = r;
  f(newr, &newr);
  Assert(r == newr); /* GC should not move r, it is not young */
}
示例#6
0
struct domain* caml_owner_of_shared_block(value v) {
  Assert (Is_block(v) && !Is_minor(v));
  mlsize_t whsize = Whsize_wosize(Wosize_val(v));
  Assert (whsize > 0); /* not an atom */
  if (whsize <= SIZECLASS_MAX) {
    /* FIXME: ORD: if we see the object, we must see the owner */
    pool* p = (pool*)((uintnat)v &~(POOL_WSIZE * sizeof(value) - 1));
    return p->owner;
  } else {
    large_alloc* a = (large_alloc*)(Hp_val(v) - LARGE_ALLOC_HEADER_SZ);
    return a->owner;
  }
}
示例#7
0
CAMLexport const value* caml_named_value(char const *name)
{
  struct named_value * nv;
  caml_root ret = NULL;
  caml_plat_lock(&named_value_lock);
  for (nv = named_value_table[hash_value_name(name)];
       nv != NULL;
       nv = nv->next) {
    if (strcmp(name, nv->name) == 0){
      ret = nv->val;
      break;
    }
  }
  caml_plat_unlock(&named_value_lock);
  /* *ret should never be a minor object, since caml_create_root promotes */
  CAMLassert (!(ret && Is_minor(caml_read_root(ret))));
  return Op_val(ret);
}
示例#8
0
CAMLexport value caml_promote(struct domain* domain, value root)
{
  struct promotion_stack stk = {0};

  if (Is_long(root))
    /* Integers are already shared */
    return root;

  if (Tag_val(root) == Stack_tag)
    /* Stacks are handled specially */
    return promote_stack(domain, root);

  if (!Is_minor(root))
    /* This value is already shared */
    return root;

  Assert(caml_owner_of_young_block(root) == domain);

  value ret = caml_promote_one(&stk, domain, root);

  while (stk.sp > 0) {
    struct promotion_stack_entry* curr = &stk.stack[stk.sp - 1];
    value local = curr->local;
    value global = curr->global;
    int field = curr->field;
    Assert(field < Wosize_val(local));
    curr->field++;
    if (curr->field == Wosize_val(local))
      stk.sp--;
    value x = Op_val(local)[field];
    if (Is_block(x) && Tag_val(x) == Stack_tag) {
      /* stacks are not promoted unless explicitly requested */
      Ref_table_add(&domain->state->remembered_set->ref, global, field);
    } else {
      x = caml_promote_one(&stk, domain, x);
    }
    Op_val(local)[field] = Op_val(global)[field] = x;
  }
  caml_stat_free(stk.stack);
  return ret;
}
示例#9
0
static value promote_stack(struct domain* domain, value stack)
{
  caml_gc_log("Promoting stack");
  Assert(Tag_val(stack) == Stack_tag);
  if (Is_minor(stack)) {
    /* First, promote the actual stack object */
    Assert(caml_owner_of_young_block(stack) == domain);
    /* Stacks are only referenced via fibers, so we don't bother
       using the promotion_table */
    void* new_stack = caml_shared_try_alloc(domain->shared_heap, Wosize_val(stack), Stack_tag, 0);
    if (!new_stack) caml_fatal_error("allocation failure during stack promotion");
    memcpy(Op_hp(new_stack), (void*)stack, Wosize_val(stack) * sizeof(value));
    stack = Val_hp(new_stack);
  }

  /* Promote each object on the stack. */
  promote_domain = domain;
  caml_scan_stack(&promote_stack_elem, stack);
  /* Since we've promoted the objects on the stack, the stack is now clean. */
  caml_clean_stack_domain(stack, domain);
  return stack;
}
示例#10
0
void forward_pointer (void* state, value v, value *p) {
  header_t hd;
  mlsize_t offset;
  value fwd;
  struct domain* promote_domain = state;
  caml_domain_state* domain_state =
    promote_domain ? promote_domain->state : Caml_state;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;

  if (Is_block (v) && is_in_interval((value)Hp_val(v), young_ptr, young_end)) {
    hd = Hd_val(v);
    if (hd == 0) {
      *p = Op_val(v)[0];
      CAMLassert (Is_block(*p) && !Is_minor(*p));
    } else if (Tag_hd(hd) == Infix_tag) {
      offset = Infix_offset_hd(hd);
      fwd = 0;
      forward_pointer (state, v - offset, &fwd);
      if (fwd) *p = fwd + offset;
    }
  }
}
示例#11
0
void caml_empty_minor_heap_domain (struct domain* domain)
{
  CAMLnoalloc;
  caml_domain_state* domain_state = domain->state;
  struct caml_minor_tables *minor_tables = domain_state->minor_tables;
  unsigned rewrite_successes = 0;
  unsigned rewrite_failures = 0;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  uintnat minor_allocated_bytes = young_end - young_ptr;
  struct oldify_state st = {0};
  value **r;
  struct caml_ephe_ref_elt *re;
  struct caml_custom_elt *elt;

  st.promote_domain = domain;

  if (minor_allocated_bytes != 0) {
    uintnat prev_alloc_words = domain_state->allocated_words;

#ifdef DEBUG
    /* In DEBUG mode, verify that the minor_ref table contains all young-young pointers
       from older to younger objects */
    {
    struct addrmap young_young_ptrs = ADDRMAP_INIT;
    mlsize_t i;
    value iter;
    for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) {
      *caml_addrmap_insert_pos(&young_young_ptrs, (value)*r) = 1;
    }
    for (iter = (value)young_ptr;
         iter < (value)young_end;
         iter = next_minor_block(domain_state, iter)) {
      value hd = Hd_hp(iter);
      if (hd != 0) {
        value curr = Val_hp(iter);
        tag_t tag = Tag_hd (hd);

        if (tag < No_scan_tag && tag != Cont_tag) {
          // FIXME: should scan Cont_tag
          for (i = 0; i < Wosize_hd(hd); i++) {
            value* f = Op_val(curr) + i;
            if (Is_block(*f) && is_in_interval(*f, young_ptr, young_end) &&
                *f < curr) {
              CAMLassert(caml_addrmap_contains(&young_young_ptrs, (value)f));
            }
          }
        }
      }
    }
    caml_addrmap_clear(&young_young_ptrs);
    }
#endif

    caml_gc_log ("Minor collection of domain %d starting", domain->state->id);
    caml_ev_begin("minor_gc");
    caml_ev_begin("minor_gc/roots");
    caml_do_local_roots(&oldify_one, &st, domain, 0);

    caml_scan_stack(&oldify_one, &st, domain_state->current_stack);

    for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) {
      value x = **r;
      oldify_one (&st, x, &x);
    }
    caml_ev_end("minor_gc/roots");

    caml_ev_begin("minor_gc/promote");
    oldify_mopup (&st);
    caml_ev_end("minor_gc/promote");

    caml_ev_begin("minor_gc/ephemerons");
    for (re = minor_tables->ephe_ref.base;
         re < minor_tables->ephe_ref.ptr; re++) {
      CAMLassert (Ephe_domain(re->ephe) == domain);
      if (re->offset == CAML_EPHE_DATA_OFFSET) {
        /* Data field has already been handled in oldify_mopup. Handle only
         * keys here. */
        continue;
      }
      value* key = &Op_val(re->ephe)[re->offset];
      if (*key != caml_ephe_none && Is_block(*key) &&
          is_in_interval(*key, young_ptr, young_end)) {
        resolve_infix_val(key);
        if (Hd_val(*key) == 0) { /* value copied to major heap */
          *key = Op_val(*key)[0];
        } else {
          CAMLassert(!ephe_check_alive_data(re,young_ptr,young_end));
          *key = caml_ephe_none;
          Ephe_data(re->ephe) = caml_ephe_none;
        }
      }
    }
    caml_ev_end("minor_gc/ephemerons");

    caml_ev_begin("minor_gc/update_minor_tables");
    for (r = minor_tables->major_ref.base;
         r < minor_tables->major_ref.ptr; r++) {
      value v = **r;
      if (Is_block (v) && is_in_interval ((value)Hp_val(v), young_ptr, young_end)) {
        value vnew;
        header_t hd = Hd_val(v);
        int offset = 0;
        if (Tag_hd(hd) == Infix_tag) {
          offset = Infix_offset_hd(hd);
          v -= offset;
        }
        CAMLassert (Hd_val(v) == 0);
        vnew = Op_val(v)[0] + offset;
        CAMLassert (Is_block(vnew) && !Is_minor(vnew));
        CAMLassert (Hd_val(vnew));
        if (Tag_hd(hd) == Infix_tag) {
          CAMLassert(Tag_val(vnew) == Infix_tag);
          v += offset;
        }
        if (caml_domain_alone()) {
          **r = vnew;
          ++rewrite_successes;
        } else {
          if (atomic_compare_exchange_strong((atomic_value*)*r, &v, vnew))
            ++rewrite_successes;
          else
            ++rewrite_failures;
        }
      }
    }
    CAMLassert (!caml_domain_alone() || rewrite_failures == 0);
    caml_ev_end("minor_gc/update_minor_tables");

    caml_ev_begin("minor_gc/finalisers");
    caml_final_update_last_minor(domain);
    /* Run custom block finalisation of dead minor values */
    for (elt = minor_tables->custom.base; elt < minor_tables->custom.ptr; elt++) {
      value v = elt->block;
      if (Hd_val(v) == 0) {
        /* !!caml_adjust_gc_speed(elt->mem, elt->max); */
      } else {
        /* Block will be freed: call finalisation function, if any */
        void (*final_fun)(value) = Custom_ops_val(v)->finalize;
        if (final_fun != NULL) final_fun(v);
      }
    }
    caml_final_empty_young(domain);
    caml_ev_end("minor_gc/finalisers");


    clear_table ((struct generic_table *)&minor_tables->major_ref);
    clear_table ((struct generic_table *)&minor_tables->minor_ref);
    clear_table ((struct generic_table *)&minor_tables->ephe_ref);
    clear_table ((struct generic_table *)&minor_tables->custom);

    domain_state->young_ptr = domain_state->young_end;
    domain_state->stat_minor_words += Wsize_bsize (minor_allocated_bytes);
    domain_state->stat_minor_collections++;
    domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words;

    caml_ev_end("minor_gc");
    caml_gc_log ("Minor collection of domain %d completed: %2.0f%% of %u KB live, rewrite: successes=%u failures=%u",
                 domain->state->id,
                 100.0 * (double)st.live_bytes / (double)minor_allocated_bytes,
                 (unsigned)(minor_allocated_bytes + 512)/1024, rewrite_successes, rewrite_failures);
  }
  else {
    caml_final_empty_young(domain);
    caml_gc_log ("Minor collection of domain %d: skipping", domain->state->id);
  }

#ifdef DEBUG
  {
    value *p;
    for (p = (value *) domain_state->young_start;
         p < (value *) domain_state->young_end; ++p){
      *p = Debug_free_minor;
    }
  }
#endif
}
示例#12
0
CAMLexport value caml_promote(struct domain* domain, value root)
{
  value **r;
  value iter, f;
  mlsize_t i;
  caml_domain_state* domain_state = domain->state;
  struct caml_minor_tables *minor_tables = domain_state->minor_tables;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  float percent_to_scan;
  uintnat prev_alloc_words = domain_state->allocated_words;
  struct oldify_state st = {0};
  struct caml_ephe_ref_elt *re;

  /* Integers are already shared */
  if (Is_long(root))
    return root;

  /* Objects which are in the major heap are already shared. */
  if (!Is_minor(root))
    return root;

  st.oldest_promoted = (value)domain_state->young_start;
  st.promote_domain = domain;

  CAMLassert(caml_owner_of_young_block(root) == domain);
  oldify_one (&st, root, &root);
  oldify_mopup (&st);

  CAMLassert (!Is_minor(root));
  /* FIXME: surely a newly-allocated root is already darkened? */
  caml_darken(0, root, 0);

  percent_to_scan = st.oldest_promoted <= (value)young_ptr ? 0.0 :
    (((float)(st.oldest_promoted - (value)young_ptr)) * 100.0 /
     ((value)young_end - (value)domain_state->young_start));

  if (percent_to_scan > Percent_to_promote_with_GC) {
    caml_gc_log("caml_promote: forcing minor GC. %%_minor_to_scan=%f", percent_to_scan);
    // ???
    caml_empty_minor_heap_domain (domain);
  } else {
    caml_do_local_roots (&forward_pointer, st.promote_domain, domain, 1);
    caml_scan_stack (&forward_pointer, st.promote_domain, domain_state->current_stack);

    /* Scan major to young pointers. */
    for (r = minor_tables->major_ref.base;
         r < minor_tables->major_ref.ptr; r++) {
      value old_p = **r;
      if (Is_block(old_p) && is_in_interval(old_p,young_ptr,young_end)) {
        value new_p = old_p;
        forward_pointer (st.promote_domain, new_p, &new_p);
        if (old_p != new_p) {
          if (caml_domain_alone())
            **r = new_p;
          else
            atomic_compare_exchange_strong((atomic_value*)*r, &old_p, new_p);
        }
      }
    }

    /* Scan ephemeron ref table */
    for (re = minor_tables->ephe_ref.base;
         re < minor_tables->ephe_ref.ptr; re++) {
      value* key = &Op_val(re->ephe)[re->offset];
      if (Is_block(*key) && is_in_interval(*key,young_ptr,young_end)) {
        forward_pointer (st.promote_domain, *key, key);
      }
    }

    /* Scan young to young pointers */
    for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) {
      forward_pointer (st.promote_domain, **r, *r);
    }

    /* Scan newer objects */
    for (iter = (value)young_ptr;
         iter <= st.oldest_promoted;
         iter = next_minor_block(domain_state, iter)) {
      value hd = Hd_hp(iter);
      value curr = Val_hp(iter);
      if (hd != 0) {
        tag_t tag = Tag_hd (hd);
        if (tag == Cont_tag) {
          struct stack_info* stk = Ptr_val(Op_val(curr)[0]);
          if (stk != NULL)
            caml_scan_stack(&forward_pointer, st.promote_domain, stk);
        } else if (tag < No_scan_tag) {
          for (i = 0; i < Wosize_hd (hd); i++) {
            f = Op_val(curr)[i];
            if (Is_block(f)) {
              forward_pointer (st.promote_domain, f,((value*)curr) + i);
            }
          }
        }
      }
    }
  }
  domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words;
  return root;
}
示例#13
0
void caml_shared_unpin(value v) {
  Assert (Is_block(v) && !Is_minor(v));
  Assert (caml_owner_of_shared_block(v) == caml_domain_self());
  Assert (Has_status_hd(Hd_val(v), NOT_MARKABLE));
  Hd_val(v) = With_status_hd(Hd_val(v), global.UNMARKED);
}