コード例 #1
0
ファイル: shared_heap.c プロジェクト: stedolan/ocaml
static void verify_object(struct heap_verify_state* st, value v) {
  if (!Is_block(v)) return;

  Assert (Hd_val(v));
  if (Tag_val(v) == Infix_tag) {
    v -= Infix_offset_val(v);
    Assert(Tag_val(v) == Closure_tag);
  }

  intnat* entry = caml_addrmap_insert_pos(&st->seen, v);
  if (*entry != ADDRMAP_NOT_PRESENT) return;
  *entry = 1;

  if (Has_status_hd(Hd_val(v), NOT_MARKABLE)) return;
  st->objs++;

  // caml_gc_log ("verify_object: v=0x%lx hd=0x%lx tag=%u", v, Hd_val(v), Tag_val(v));
  if (!Is_minor(v)) {
    Assert(Has_status_hd(Hd_val(v), global.UNMARKED));
  }

  if (Tag_val(v) == Stack_tag) {
    caml_scan_stack(verify_push, st, v);
  } else if (Tag_val(v) < No_scan_tag) {
    int i;
    for (i = 0; i < Wosize_val(v); i++) {
      value f = Op_val(v)[i];
      if (Is_minor(v) && Is_minor(f)) {
        Assert(caml_owner_of_young_block(v) ==
               caml_owner_of_young_block(f));
      }
      if (Is_block(f)) verify_push(st, f, 0);
    }
  }
}
コード例 #2
0
static void verify_object(value v) {
  if (!Is_block(v)) return;

  if (Tag_val(v) == Infix_tag) {
    v -= Infix_offset_val(v);
    Assert(Tag_val(v) == Closure_tag);
  }

  intnat* entry = caml_addrmap_insert_pos(&verify_seen, v);
  if (*entry != ADDRMAP_NOT_PRESENT) return;
  *entry = 1;

  if (Has_status_hd(Hd_val(v), NOT_MARKABLE)) return;
  verify_objs++;

  if (!Is_minor(v)) {
    Assert(Has_status_hd(Hd_val(v), global.MARKED));
  }

  if (Tag_val(v) == Stack_tag) {
    caml_scan_stack(verify_push, v);
  } else if (Tag_val(v) < No_scan_tag) {
    int i;
    for (i = 0; i < Wosize_val(v); i++) {
      value f = Op_val(v)[i];
      if (Is_minor(v) && Is_minor(f)) {
        Assert(caml_owner_of_young_block(v) ==
               caml_owner_of_young_block(f));
      }
      if (Is_block(f)) verify_push(f, 0);
    }
  }
}
コード例 #3
0
ファイル: minor_gc.c プロジェクト: bluddy/ocaml-multicore
static value caml_promote_one(struct promotion_stack* stk, struct domain* domain, value curr)
{
  header_t curr_block_hd;
  int infix_offset = 0;
  if (Is_long(curr) || !Is_minor(curr))
    return curr; /* needs no promotion */

  Assert(caml_owner_of_young_block(curr) == domain);

  curr_block_hd = Hd_val(curr);

  if (Tag_hd(curr_block_hd) == Infix_tag) {
    infix_offset = Infix_offset_val(curr);
    curr -= infix_offset;
    curr_block_hd = Hd_val(curr);
  }

  if (Is_promoted_hd(curr_block_hd)) {
    /* already promoted */
    return caml_addrmap_lookup(&domain->state->remembered_set->promotion, curr) + infix_offset;
  } else if (curr_block_hd == 0) {
    /* promoted by minor GC */
    return Op_val(curr)[0] + infix_offset;
  }

  /* otherwise, must promote */
  void* mem = caml_shared_try_alloc(domain->shared_heap, Wosize_hd(curr_block_hd),
                                           Tag_hd(curr_block_hd), 1);
  if (!mem) caml_fatal_error("allocation failure during promotion");
  value promoted = Val_hp(mem);
  Hd_val(curr) = Promotedhd_hd(curr_block_hd);

  caml_addrmap_insert(&domain->state->remembered_set->promotion, curr, promoted);
  caml_addrmap_insert(&domain->state->remembered_set->promotion_rev, promoted, curr);

  if (Tag_hd(curr_block_hd) >= No_scan_tag) {
    int i;
    for (i = 0; i < Wosize_hd(curr_block_hd); i++)
      Op_val(promoted)[i] = Op_val(curr)[i];
  } else {
    /* push to stack */
    if (stk->sp == stk->stack_len) {
      stk->stack_len = 2 * (stk->stack_len + 10);
      stk->stack = caml_stat_resize(stk->stack,
          sizeof(struct promotion_stack_entry) * stk->stack_len);
    }
    stk->stack[stk->sp].local = curr;
    stk->stack[stk->sp].global = promoted;
    stk->stack[stk->sp].field = 0;
    stk->sp++;
  }
  return promoted + infix_offset;
}
コード例 #4
0
ファイル: minor_gc.c プロジェクト: bluddy/ocaml-multicore
CAMLexport value caml_promote(struct domain* domain, value root)
{
  struct promotion_stack stk = {0};

  if (Is_long(root))
    /* Integers are already shared */
    return root;

  if (Tag_val(root) == Stack_tag)
    /* Stacks are handled specially */
    return promote_stack(domain, root);

  if (!Is_minor(root))
    /* This value is already shared */
    return root;

  Assert(caml_owner_of_young_block(root) == domain);

  value ret = caml_promote_one(&stk, domain, root);

  while (stk.sp > 0) {
    struct promotion_stack_entry* curr = &stk.stack[stk.sp - 1];
    value local = curr->local;
    value global = curr->global;
    int field = curr->field;
    Assert(field < Wosize_val(local));
    curr->field++;
    if (curr->field == Wosize_val(local))
      stk.sp--;
    value x = Op_val(local)[field];
    if (Is_block(x) && Tag_val(x) == Stack_tag) {
      /* stacks are not promoted unless explicitly requested */
      Ref_table_add(&domain->state->remembered_set->ref, global, field);
    } else {
      x = caml_promote_one(&stk, domain, x);
    }
    Op_val(local)[field] = Op_val(global)[field] = x;
  }
  caml_stat_free(stk.stack);
  return ret;
}
コード例 #5
0
ファイル: minor_gc.c プロジェクト: bluddy/ocaml-multicore
static value promote_stack(struct domain* domain, value stack)
{
  caml_gc_log("Promoting stack");
  Assert(Tag_val(stack) == Stack_tag);
  if (Is_minor(stack)) {
    /* First, promote the actual stack object */
    Assert(caml_owner_of_young_block(stack) == domain);
    /* Stacks are only referenced via fibers, so we don't bother
       using the promotion_table */
    void* new_stack = caml_shared_try_alloc(domain->shared_heap, Wosize_val(stack), Stack_tag, 0);
    if (!new_stack) caml_fatal_error("allocation failure during stack promotion");
    memcpy(Op_hp(new_stack), (void*)stack, Wosize_val(stack) * sizeof(value));
    stack = Val_hp(new_stack);
  }

  /* Promote each object on the stack. */
  promote_domain = domain;
  caml_scan_stack(&promote_stack_elem, stack);
  /* Since we've promoted the objects on the stack, the stack is now clean. */
  caml_clean_stack_domain(stack, domain);
  return stack;
}
コード例 #6
0
ファイル: minor_gc.c プロジェクト: ocamllabs/ocaml-multicore
CAMLexport value caml_promote(struct domain* domain, value root)
{
  value **r;
  value iter, f;
  mlsize_t i;
  caml_domain_state* domain_state = domain->state;
  struct caml_minor_tables *minor_tables = domain_state->minor_tables;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  float percent_to_scan;
  uintnat prev_alloc_words = domain_state->allocated_words;
  struct oldify_state st = {0};
  struct caml_ephe_ref_elt *re;

  /* Integers are already shared */
  if (Is_long(root))
    return root;

  /* Objects which are in the major heap are already shared. */
  if (!Is_minor(root))
    return root;

  st.oldest_promoted = (value)domain_state->young_start;
  st.promote_domain = domain;

  CAMLassert(caml_owner_of_young_block(root) == domain);
  oldify_one (&st, root, &root);
  oldify_mopup (&st);

  CAMLassert (!Is_minor(root));
  /* FIXME: surely a newly-allocated root is already darkened? */
  caml_darken(0, root, 0);

  percent_to_scan = st.oldest_promoted <= (value)young_ptr ? 0.0 :
    (((float)(st.oldest_promoted - (value)young_ptr)) * 100.0 /
     ((value)young_end - (value)domain_state->young_start));

  if (percent_to_scan > Percent_to_promote_with_GC) {
    caml_gc_log("caml_promote: forcing minor GC. %%_minor_to_scan=%f", percent_to_scan);
    // ???
    caml_empty_minor_heap_domain (domain);
  } else {
    caml_do_local_roots (&forward_pointer, st.promote_domain, domain, 1);
    caml_scan_stack (&forward_pointer, st.promote_domain, domain_state->current_stack);

    /* Scan major to young pointers. */
    for (r = minor_tables->major_ref.base;
         r < minor_tables->major_ref.ptr; r++) {
      value old_p = **r;
      if (Is_block(old_p) && is_in_interval(old_p,young_ptr,young_end)) {
        value new_p = old_p;
        forward_pointer (st.promote_domain, new_p, &new_p);
        if (old_p != new_p) {
          if (caml_domain_alone())
            **r = new_p;
          else
            atomic_compare_exchange_strong((atomic_value*)*r, &old_p, new_p);
        }
      }
    }

    /* Scan ephemeron ref table */
    for (re = minor_tables->ephe_ref.base;
         re < minor_tables->ephe_ref.ptr; re++) {
      value* key = &Op_val(re->ephe)[re->offset];
      if (Is_block(*key) && is_in_interval(*key,young_ptr,young_end)) {
        forward_pointer (st.promote_domain, *key, key);
      }
    }

    /* Scan young to young pointers */
    for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) {
      forward_pointer (st.promote_domain, **r, *r);
    }

    /* Scan newer objects */
    for (iter = (value)young_ptr;
         iter <= st.oldest_promoted;
         iter = next_minor_block(domain_state, iter)) {
      value hd = Hd_hp(iter);
      value curr = Val_hp(iter);
      if (hd != 0) {
        tag_t tag = Tag_hd (hd);
        if (tag == Cont_tag) {
          struct stack_info* stk = Ptr_val(Op_val(curr)[0]);
          if (stk != NULL)
            caml_scan_stack(&forward_pointer, st.promote_domain, stk);
        } else if (tag < No_scan_tag) {
          for (i = 0; i < Wosize_hd (hd); i++) {
            f = Op_val(curr)[i];
            if (Is_block(f)) {
              forward_pointer (st.promote_domain, f,((value*)curr) + i);
            }
          }
        }
      }
    }
  }
  domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words;
  return root;
}