示例#1
0
static void verify_object(value v) {
  if (!Is_block(v)) return;

  if (Tag_val(v) == Infix_tag) {
    v -= Infix_offset_val(v);
    Assert(Tag_val(v) == Closure_tag);
  }

  intnat* entry = caml_addrmap_insert_pos(&verify_seen, v);
  if (*entry != ADDRMAP_NOT_PRESENT) return;
  *entry = 1;

  if (Has_status_hd(Hd_val(v), NOT_MARKABLE)) return;
  verify_objs++;

  if (!Is_minor(v)) {
    Assert(Has_status_hd(Hd_val(v), global.MARKED));
  }

  if (Tag_val(v) == Stack_tag) {
    caml_scan_stack(verify_push, v);
  } else if (Tag_val(v) < No_scan_tag) {
    int i;
    for (i = 0; i < Wosize_val(v); i++) {
      value f = Op_val(v)[i];
      if (Is_minor(v) && Is_minor(f)) {
        Assert(caml_owner_of_young_block(v) ==
               caml_owner_of_young_block(f));
      }
      if (Is_block(f)) verify_push(f, 0);
    }
  }
}
示例#2
0
value* caml_shared_try_alloc(struct caml_heap_state* local, mlsize_t wosize,
                             tag_t tag, int pinned)
{
  mlsize_t whsize = Whsize_wosize(wosize);
  value* p;
  uintnat colour;

  Assert (wosize > 0);
  Assert (tag != Infix_tag);
  if (whsize <= SIZECLASS_MAX) {
    sizeclass sz = sizeclass_wsize[whsize];
    Assert(wsize_sizeclass[sz] >= whsize);
    p = pool_allocate(local, sz);
    if (!p) return 0;
    struct heap_stats* s = &local->stats;
    s->pool_live_blocks++;
    s->pool_live_words += whsize;
    s->pool_frag_words += wsize_sizeclass[sz] - whsize;
  } else {
    p = large_allocate(local, Bsize_wsize(whsize));
    if (!p) return 0;
  }
  colour = pinned ? NOT_MARKABLE : global.MARKED;
  Hd_hp (p) = Make_header(wosize, tag, colour);
#ifdef DEBUG
  {
    int i;
    for (i = 0; i < wosize; i++) {
      Op_val(Val_hp(p))[i] = Debug_free_major;
    }
  }
#endif
  return p;
}
示例#3
0
static int sml_equal_aux(value v1, value v2)
{
  mlsize_t i;
  value * p1, * p2;

 again:
  if (v1 == v2) return 1;
  if (IS_LONG(v1) || IS_LONG(v2)) return 0;
  if (!Is_in_heap(v1) && !Is_young(v1)) return 0;
  if (!Is_in_heap(v2) && !Is_young(v2)) return 0;
  if (Tag_val(v1) != Tag_val(v2)) return 0;
  switch(Tag_val(v1)) {
  case String_tag:
  {
	  // Fast string comparison
	  size_t len = string_length(v1);
	  if  (len != string_length(v2)) {
		  return 0;
	  }

	  return (strncmp((char *) String_val(v1),
			  (char *) String_val(v2),
			  len) == 0) ? 1 : 0;
  }
  case Double_tag:
    return (Double_val(v1) == Double_val(v2));
  case Reference_tag:  /* Different reference cells are not equal! */
  case Abstract_tag:
  case Final_tag:
    return 0;
  case Closure_tag:
    invalid_argument("sml_equal: functional value");
  default:
    i = Wosize_val(v1);
    if (i != Wosize_val(v2)) return 0;
    for(p1 = Op_val(v1), p2 = Op_val(v2);
        i > 1;
        i--, p1++, p2++)
      if (!sml_equal_aux(*p1, *p2)) return 0;
    v1 = *p1;
    v2 = *p2;                   /* Tail-call */
    goto again;
  }
}
示例#4
0
CAMLexport void caml_iterate_named_values(caml_named_action f)
{
  int i;
  for(i = 0; i < Named_value_size; i++){
    struct named_value * nv;
    for (nv = named_value_table[i]; nv != NULL; nv = nv->next) {
      f( Op_val(nv->val), nv->name );
    }
  }
}
示例#5
0
CAMLexport value caml_promote(struct domain* domain, value root)
{
  struct promotion_stack stk = {0};

  if (Is_long(root))
    /* Integers are already shared */
    return root;

  if (Tag_val(root) == Stack_tag)
    /* Stacks are handled specially */
    return promote_stack(domain, root);

  if (!Is_minor(root))
    /* This value is already shared */
    return root;

  Assert(caml_owner_of_young_block(root) == domain);

  value ret = caml_promote_one(&stk, domain, root);

  while (stk.sp > 0) {
    struct promotion_stack_entry* curr = &stk.stack[stk.sp - 1];
    value local = curr->local;
    value global = curr->global;
    int field = curr->field;
    Assert(field < Wosize_val(local));
    curr->field++;
    if (curr->field == Wosize_val(local))
      stk.sp--;
    value x = Op_val(local)[field];
    if (Is_block(x) && Tag_val(x) == Stack_tag) {
      /* stacks are not promoted unless explicitly requested */
      Ref_table_add(&domain->state->remembered_set->ref, global, field);
    } else {
      x = caml_promote_one(&stk, domain, x);
    }
    Op_val(local)[field] = Op_val(global)[field] = x;
  }
  caml_stat_free(stk.stack);
  return ret;
}
static void read_main_debug_info(struct debug_info *di)
{
  CAMLparam0();
  CAMLlocal3(events, evl, l);
  char_os *exec_name;
  int fd, num_events, orig, i;
  struct channel *chan;
  struct exec_trailer trail;

  CAMLassert(di->already_read == 0);
  di->already_read = 1;

  if (caml_params->cds_file != NULL) {
    exec_name = (char_os*) caml_params->cds_file;
  } else {
    exec_name = (char_os*) caml_params->exe_name;
  }

  fd = caml_attempt_open(&exec_name, &trail, 1);
  if (fd < 0){
    caml_fatal_error ("executable program file not found");
    CAMLreturn0;
  }

  caml_read_section_descriptors(fd, &trail);
  if (caml_seek_optional_section(fd, &trail, "DBUG") != -1) {
    chan = caml_open_descriptor_in(fd);

    num_events = caml_getword(chan);
    events = caml_alloc(num_events, 0);

    for (i = 0; i < num_events; i++) Op_val(events)[i] = Val_unit;

    for (i = 0; i < num_events; i++) {
      orig = caml_getword(chan);
      evl = caml_input_val(chan);
      caml_input_val(chan); /* Skip the list of absolute directory names */
      /* Relocate events in event list */
      for (l = evl; l != Val_int(0); l = Field_imm(l, 1)) {
        value ev = Field_imm(l, 0);
        Store_field (ev, EV_POS, Val_long(Long_val(Field(ev, EV_POS)) + orig));
      }
      /* Record event list */
      Store_field(events, i, evl);
    }

    caml_close_channel(chan);

    di->events = process_debug_events(caml_start_code, events, &di->num_events);
  }

  CAMLreturn0;
}
示例#7
0
static intnat mark(value initial, intnat budget) {
  value next = initial;
  int found_next = 1;
  while (budget > 0 && found_next) {
    value v = next;
    header_t hd_v;
    found_next = 0;

    Assert(Is_markable(v));
    Assert(v == mark_normalise(v));

    stat_blocks_marked++;
    /* mark the current object */
    hd_v = Hd_val(v);
    // caml_gc_log ("mark: v=0x%lx hd=0x%lx tag=%d sz=%lu",
    //             v, hd_v, Tag_val(v), Wosize_val(v));
    if (Tag_hd (hd_v) == Stack_tag) {
      // caml_gc_log ("mark: stack=%p", (value*)v);
      caml_scan_stack(&caml_darken, v);
    } else if (Tag_hd (hd_v) < No_scan_tag) {
      int i;
      for (i = 0; i < Wosize_hd(hd_v); i++) {
        value child = Op_val(v)[i];
        // caml_gc_log ("mark: v=%p i=%u child=%p",(value*)v,i,(value*)child);
        /* FIXME: this is wrong, as Debug_tag(N) is a valid value.
           However, it's a useful debugging aid for now */
        Assert(!Is_debug_tag(child));
        if (Is_markable(child)) {
          child = mark_normalise(child);
          if (caml_mark_object(child)) {
            if (!found_next) {
              next = child;
              found_next = 1;
            } else {
              mark_stack_push(child);
            }
          }
        }
      }
    }
    budget -= Whsize_hd(hd_v);

    /* if we haven't found any markable children, pop an object to mark */
    if (!found_next) {
      found_next = mark_stack_pop(&next);
    }
  }
  if (found_next) {
    mark_stack_push(next);
  }
  return budget;
}
示例#8
0
value svec_setcptrvalue(value vec)
{
  value res;
  
  Push_roots(r, 1);
  r[0] = vec;
  res = alloc_string(sizeof(void *));
  bcopy(String_val(r[0]), Op_val(res), sizeof(void *));

  if (jit_ffi_debug)
     fprintf(stderr,"svec_setcptrvalue returning 0x%8.8x [0x%8.8x].\n",
          *(unsigned int *)(String_val(r[0])), *(unsigned int *) res);

  Pop_roots();
  return (value) (*(unsigned int *)res);
}
示例#9
0
static value next_minor_block(caml_domain_state* domain_state, value curr_hp)
{
  mlsize_t wsz;
  header_t hd;
  value curr_val;
  CAMLassert ((value)domain_state->young_ptr <= curr_hp);
  CAMLassert (curr_hp < (value)domain_state->young_end);
  hd = Hd_hp(curr_hp);
  curr_val = Val_hp(curr_hp);
  if (hd == 0) {
    /* Forwarded object, find the promoted version */
    curr_val = Op_val(curr_val)[0];
  }
  CAMLassert (Is_block(curr_val) && Hd_val(curr_val) != 0 && Tag_val(curr_val) != Infix_tag);
  wsz = Wosize_val(curr_val);
  CAMLassert (wsz <= Max_young_wosize);
  return curr_hp + Bsize_wsize(Whsize_wosize(wsz));
}
示例#10
0
CAMLexport const value* caml_named_value(char const *name)
{
  struct named_value * nv;
  caml_root ret = NULL;
  caml_plat_lock(&named_value_lock);
  for (nv = named_value_table[hash_value_name(name)];
       nv != NULL;
       nv = nv->next) {
    if (strcmp(name, nv->name) == 0){
      ret = nv->val;
      break;
    }
  }
  caml_plat_unlock(&named_value_lock);
  /* *ret should never be a minor object, since caml_create_root promotes */
  CAMLassert (!(ret && Is_minor(caml_read_root(ret))));
  return Op_val(ret);
}
示例#11
0
/* Test if the ephemeron is alive */
static inline int ephe_check_alive_data (struct caml_ephe_ref_elt *re,
                                         char* young_ptr, char* young_end)
{
  mlsize_t i;
  value child;

  for (i = CAML_EPHE_FIRST_KEY; i < Wosize_val(re->ephe); i++) {
    child = Op_val(re->ephe)[i];
    if (child != caml_ephe_none
        && Is_block (child) && is_in_interval(child, young_ptr, young_end)) {
      resolve_infix_val(&child);
      if (Hd_val(child) != 0) {
        /* value not copied to major heap */
        return 0;
      }
    }
  }
  return 1;
}
示例#12
0
value* caml_shared_try_alloc(struct caml_heap_state* local, mlsize_t wosize, tag_t tag, int pinned) {
  mlsize_t whsize = Whsize_wosize(wosize);
  value* p;
  Assert (wosize > 0);
  Assert (tag != Infix_tag);
  if (whsize <= SIZECLASS_MAX) {
    p = pool_allocate(local, sizeclass_wsize[whsize]);
  } else {
    p = large_allocate(local, Bsize_wsize(whsize));
  }
  if (!p) return 0;
  Hd_hp (p) = Make_header(wosize, tag, pinned ? NOT_MARKABLE : global.UNMARKED);
#ifdef DEBUG
  {
    int i;
    for (i = 0; i < wosize; i++) {
      Op_val(Val_hp(p))[i] = Debug_free_major;
    }
  }
#endif
  return p;
}
示例#13
0
void forward_pointer (void* state, value v, value *p) {
  header_t hd;
  mlsize_t offset;
  value fwd;
  struct domain* promote_domain = state;
  caml_domain_state* domain_state =
    promote_domain ? promote_domain->state : Caml_state;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;

  if (Is_block (v) && is_in_interval((value)Hp_val(v), young_ptr, young_end)) {
    hd = Hd_val(v);
    if (hd == 0) {
      *p = Op_val(v)[0];
      CAMLassert (Is_block(*p) && !Is_minor(*p));
    } else if (Tag_hd(hd) == Infix_tag) {
      offset = Infix_offset_hd(hd);
      fwd = 0;
      forward_pointer (state, v - offset, &fwd);
      if (fwd) *p = fwd + offset;
    }
  }
}
示例#14
0
/* Make sure the minor heap is empty by performing a minor collection if
 * needed. */
void caml_empty_minor_heap (void)
{
  uintnat minor_allocated_bytes = caml_domain_state->young_end - caml_domain_state->young_ptr;
  unsigned rewritten = 0;
  struct caml_ref_entry *r;

  caml_save_stack_gc();

  stat_live_bytes = 0;

  if (minor_allocated_bytes != 0){
    caml_gc_log ("Minor collection starting");
    caml_do_local_roots(&caml_oldify_one, caml_domain_self());

    for (r = caml_domain_state->remembered_set->ref.base; r < caml_domain_state->remembered_set->ref.ptr; r++){
      value x;
      caml_oldify_one (Op_val(r->obj)[r->field], &x);
    }

    for (r = caml_domain_state->remembered_set->fiber_ref.base; r < caml_domain_state->remembered_set->fiber_ref.ptr; r++) {
      caml_scan_dirty_stack(&caml_oldify_one, r->obj);
    }

    caml_oldify_mopup ();

    for (r = caml_domain_state->remembered_set->ref.base; r < caml_domain_state->remembered_set->ref.ptr; r++){
      value v = Op_val(r->obj)[r->field];
      if (Is_block(v) && Is_young(v)) {
        Assert (Hp_val (v) >= caml_domain_state->young_ptr);
        value vnew;
        header_t hd = Hd_val(v);
        // FIXME: call oldify_one here?
        if (Is_promoted_hd(hd)) {
          vnew = caml_addrmap_lookup(&caml_domain_state->remembered_set->promotion, v);
        } else {
          int offset = 0;
          if (Tag_hd(hd) == Infix_tag) {
            offset = Infix_offset_hd(hd);
            v -= offset;
          }
          Assert (Hd_val (v) == 0);
          vnew = Op_val(v)[0] + offset;
        }
        Assert(Is_block(vnew) && !Is_young(vnew));
        Assert(Hd_val(vnew));
        if (Tag_hd(hd) == Infix_tag) { Assert(Tag_val(vnew) == Infix_tag); }
        rewritten += caml_atomic_cas_field(r->obj, r->field, v, vnew);
      }
    }

    caml_addrmap_iter(&caml_domain_state->remembered_set->promotion, unpin_promoted_object);

    if (caml_domain_state->young_ptr < caml_domain_state->young_start)
      caml_domain_state->young_ptr = caml_domain_state->young_start;
    caml_stat_minor_words += Wsize_bsize (minor_allocated_bytes);
    caml_domain_state->young_ptr = caml_domain_state->young_end;
    clear_table (&caml_domain_state->remembered_set->ref);
    caml_addrmap_clear(&caml_domain_state->remembered_set->promotion);
    caml_addrmap_clear(&caml_domain_state->remembered_set->promotion_rev);
    caml_gc_log ("Minor collection completed: %u of %u kb live, %u pointers rewritten",
                 (unsigned)stat_live_bytes/1024, (unsigned)minor_allocated_bytes/1024, rewritten);
  }

  for (r = caml_domain_state->remembered_set->fiber_ref.base; r < caml_domain_state->remembered_set->fiber_ref.ptr; r++) {
    caml_scan_dirty_stack(&caml_darken, r->obj);
    caml_clean_stack(r->obj);
  }
  clear_table (&caml_domain_state->remembered_set->fiber_ref);

  caml_restore_stack_gc();

#ifdef DEBUG
  {
    value *p;
    for (p = (value *) caml_domain_state->young_start;
         p < (value *) caml_domain_state->young_end; ++p){
      *p = Debug_free_minor;
    }
    ++ minor_gc_counter;
  }
#endif
}
示例#15
0
static void caml_oldify_one (value v, value *p)
{
  value result;
  header_t hd;
  mlsize_t sz, i;
  tag_t tag;

 tail_call:
  if (Is_block (v) && Is_young (v)){
    Assert (Hp_val (v) >= caml_domain_state->young_ptr);
    hd = Hd_val (v);
    stat_live_bytes += Bhsize_hd(hd);
    if (Is_promoted_hd (hd)) {
      *p = caml_addrmap_lookup(&caml_domain_state->remembered_set->promotion, v);
    } else if (hd == 0){         /* If already forwarded */
      *p = Op_val(v)[0];  /*  then forward pointer is first field. */
    }else{
      tag = Tag_hd (hd);
      if (tag < Infix_tag){
        value field0;

        sz = Wosize_hd (hd);
        result = alloc_shared (sz, tag);
        *p = result;
        if (tag == Stack_tag) {
          memcpy((void*)result, (void*)v, sizeof(value) * sz);
          Hd_val (v) = 0;
          Op_val(v)[0] = result;
          Op_val(v)[1] = oldify_todo_list;
          oldify_todo_list = v;
        } else {
          field0 = Op_val(v)[0];
          Hd_val (v) = 0;            /* Set forward flag */
          Op_val(v)[0] = result;     /*  and forward pointer. */
          if (sz > 1){
            Op_val (result)[0] = field0;
            Op_val (result)[1] = oldify_todo_list;    /* Add this block */
            oldify_todo_list = v;                    /*  to the "to do" list. */
          }else{
            Assert (sz == 1);
            p = Op_val(result);
            v = field0;
            goto tail_call;
          }
        }
      }else if (tag >= No_scan_tag){
        sz = Wosize_hd (hd);
        result = alloc_shared(sz, tag);
        for (i = 0; i < sz; i++) Op_val (result)[i] = Op_val(v)[i];
        Hd_val (v) = 0;            /* Set forward flag */
        Op_val (v)[0] = result;    /*  and forward pointer. */
        *p = result;
      }else if (tag == Infix_tag){
        mlsize_t offset = Infix_offset_hd (hd);
        caml_oldify_one (v - offset, p);   /* Cannot recurse deeper than 1. */
        *p += offset;
      } else{
        value f = Forward_val (v);
        tag_t ft = 0;
        int vv = 1;

        Assert (tag == Forward_tag);
        if (Is_block (f)){
          if (Is_young (f)){
            vv = 1;
            ft = Tag_val (Hd_val (f) == 0 ? Op_val (f)[0] : f);
          }else{
            vv = 1;
            if (vv){
              ft = Tag_val (f);
            }
          }
        }
        if (!vv || ft == Forward_tag || ft == Lazy_tag || ft == Double_tag){
          /* Do not short-circuit the pointer.  Copy as a normal block. */
          Assert (Wosize_hd (hd) == 1);
          result = alloc_shared (1, Forward_tag);
          *p = result;
          Hd_val (v) = 0;             /* Set (GC) forward flag */
          Op_val (v)[0] = result;      /*  and forward pointer. */
          p = Op_val (result);
          v = f;
          goto tail_call;
        }else{
          v = f;                        /* Follow the forwarding */
          goto tail_call;               /*  then oldify. */
        }
      }
    }
  }else{
    *p = v;
  }
}
示例#16
0
static intnat compare_val(value v1, value v2, int total)
{
  struct compare_item * sp;
  tag_t t1, t2;

  if (!compare_stack) compare_init_stack();

  sp = compare_stack;
  while (1) {
    if (v1 == v2 && total) goto next_item;
    if (Is_long(v1)) {
      if (v1 == v2) goto next_item;
      if (Is_long(v2))
        return Long_val(v1) - Long_val(v2);
      /* Subtraction above cannot overflow and cannot result in UNORDERED */
      switch (Tag_val(v2)) {
      case Forward_tag:
        v2 = Forward_val(v2);
        continue;
      case Custom_tag: {
        int res;
        int (*compare)(value v1, value v2) = Custom_ops_val(v2)->compare_ext;
        if (compare == NULL) break;  /* for backward compatibility */
        caml_compare_unordered = 0;
        res = compare(v1, v2);
        if (caml_compare_unordered && !total) return UNORDERED;
        if (res != 0) return res;
        goto next_item;
      }
      default: /*fallthrough*/;
      }
      
      return LESS;                /* v1 long < v2 block */
    }
    if (Is_long(v2)) {
      switch (Tag_val(v1)) {
      case Forward_tag:
        v1 = Forward_val(v1);
        continue;
      case Custom_tag: {
        int res;
        int (*compare)(value v1, value v2) = Custom_ops_val(v1)->compare_ext;
        if (compare == NULL) break;  /* for backward compatibility */
        caml_compare_unordered = 0;
        res = compare(v1, v2);
        if (caml_compare_unordered && !total) return UNORDERED;
        if (res != 0) return res;
        goto next_item;
      }
      default: /*fallthrough*/;
      }
      return GREATER;            /* v1 block > v2 long */
    }
    t1 = Tag_val(v1);
    t2 = Tag_val(v2);
    if (t1 == Forward_tag) { v1 = Forward_val (v1); continue; }
    if (t2 == Forward_tag) { v2 = Forward_val (v2); continue; }
    if (t1 != t2) return (intnat)t1 - (intnat)t2;
    switch(t1) {
    case String_tag: {
      mlsize_t len1, len2;
      int res;
      if (v1 == v2) break;
      len1 = caml_string_length(v1);
      len2 = caml_string_length(v2);
      res = memcmp(String_val(v1), String_val(v2), len1 <= len2 ? len1 : len2);
      if (res < 0) return LESS;
      if (res > 0) return GREATER;
      if (len1 != len2) return len1 - len2;
      break;
    }
    case Double_tag: {
      double d1 = Double_val(v1);
      double d2 = Double_val(v2);
      if (d1 < d2) return LESS;
      if (d1 > d2) return GREATER;
      if (d1 != d2) {
        if (! total) return UNORDERED;
        /* One or both of d1 and d2 is NaN.  Order according to the
           convention NaN = NaN and NaN < f for all other floats f. */
        if (d1 == d1) return GREATER; /* d1 is not NaN, d2 is NaN */
        if (d2 == d2) return LESS;    /* d2 is not NaN, d1 is NaN */
        /* d1 and d2 are both NaN, thus equal: continue comparison */
      }
      break;
    }
    case Double_array_tag: {
      mlsize_t sz1 = Wosize_val(v1) / Double_wosize;
      mlsize_t sz2 = Wosize_val(v2) / Double_wosize;
      mlsize_t i;
      if (sz1 != sz2) return sz1 - sz2;
      for (i = 0; i < sz1; i++) {
        double d1 = Double_field(v1, i);
        double d2 = Double_field(v2, i);
        if (d1 < d2) return LESS;
        if (d1 > d2) return GREATER;
        if (d1 != d2) {
          if (! total) return UNORDERED;
          /* See comment for Double_tag case */
          if (d1 == d1) return GREATER;
          if (d2 == d2) return LESS;
        }
      }
      break;
    }
    case Abstract_tag:
      compare_free_stack();
      caml_invalid_argument("equal: abstract value");
    case Closure_tag:
    case Infix_tag:
      compare_free_stack();
      caml_invalid_argument("equal: functional value");
    case Object_tag: {
      intnat oid1 = Oid_val(v1);
      intnat oid2 = Oid_val(v2);
      if (oid1 != oid2) return oid1 - oid2;
      break;
    }
    case Custom_tag: {
      int res;
      int (*compare)(value v1, value v2) = Custom_ops_val(v1)->compare;
      /* Hardening against comparisons between different types */
      if (compare != Custom_ops_val(v2)->compare) {
        return strcmp(Custom_ops_val(v1)->identifier,
                      Custom_ops_val(v2)->identifier) < 0
               ? LESS : GREATER;
      }
      if (compare == NULL) {
        compare_free_stack();
        caml_invalid_argument("equal: abstract value");
      }
      caml_compare_unordered = 0;
      res = compare(v1, v2);
      if (caml_compare_unordered && !total) return UNORDERED;
      if (res != 0) return res;
      break;
    }
    default: {
      mlsize_t sz1 = Wosize_val(v1);
      mlsize_t sz2 = Wosize_val(v2);
      /* Compare sizes first for speed */
      if (sz1 != sz2) return sz1 - sz2;
      if (sz1 == 0) break;
      /* Remember that we still have to compare fields 1 ... sz - 1 */
      if (sz1 > 1) {
        sp++;
        if (sp >= compare_stack_limit) sp = compare_resize_stack(sp);
        sp->v1 = Op_val(v1) + 1;
        sp->v2 = Op_val(v2) + 1;
        sp->count = sz1 - 1;
      }
      /* Continue comparison with first field */
      v1 = Field(v1, 0);
      v2 = Field(v2, 0);
      continue;
    }
    }
  next_item:
    /* Pop one more item to compare, if any */
    if (sp == compare_stack) return EQUAL; /* we're done */
    v1 = *((sp->v1)++);
    v2 = *((sp->v2)++);
    if (--(sp->count) == 0) sp--;
  }
}
示例#17
0
/* Note that the tests on the tag depend on the fact that Infix_tag,
   Forward_tag, and No_scan_tag are contiguous. */
static void oldify_one (void* st_v, value v, value *p)
{
  struct oldify_state* st = st_v;
  value result;
  header_t hd;
  mlsize_t sz, i;
  mlsize_t infix_offset;
  tag_t tag;
  caml_domain_state* domain_state =
    st->promote_domain ? st->promote_domain->state : Caml_state;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  CAMLassert (domain_state->young_start <= domain_state->young_ptr &&
          domain_state->young_ptr <= domain_state->young_end);

 tail_call:
  if (!(Is_block(v) && is_in_interval((value)Hp_val(v), young_ptr, young_end))) {
    /* not a minor block */
    *p = v;
    return;
  }

  infix_offset = 0;
  do {
    hd = Hd_val (v);
    if (hd == 0) {
      /* already forwarded, forward pointer is first field. */
      *p = Op_val(v)[0] + infix_offset;
      return;
    }
    tag = Tag_hd (hd);
    if (tag == Infix_tag) {
      /* Infix header, retry with the real block */
      CAMLassert (infix_offset == 0);
      infix_offset = Infix_offset_hd (hd);
      CAMLassert(infix_offset > 0);
      v -= infix_offset;
    }
  } while (tag == Infix_tag);

  if (((value)Hp_val(v)) > st->oldest_promoted) {
    st->oldest_promoted = (value)Hp_val(v);
  }

  if (tag == Cont_tag) {
    struct stack_info* stk = Ptr_val(Op_val(v)[0]);
    CAMLassert(Wosize_hd(hd) == 1 && infix_offset == 0);
    result = alloc_shared(1, Cont_tag);
    *p = result;
    Op_val(result)[0] = Val_ptr(stk);
    *Hp_val (v) = 0;
    Op_val(v)[0] = result;
    if (stk != NULL)
      caml_scan_stack(&oldify_one, st, stk);
  } else if (tag < Infix_tag) {
    value field0;
    sz = Wosize_hd (hd);
    st->live_bytes += Bhsize_hd(hd);
    result = alloc_shared (sz, tag);
    *p = result + infix_offset;
    field0 = Op_val(v)[0];
    CAMLassert (!Is_debug_tag(field0));
    *Hp_val (v) = 0;           /* Set forward flag */
    Op_val(v)[0] = result;     /*  and forward pointer. */
    if (sz > 1){
      Op_val (result)[0] = field0;
      Op_val (result)[1] = st->todo_list;    /* Add this block */
      st->todo_list = v;                     /*  to the "to do" list. */
    }else{
      CAMLassert (sz == 1);
      p = Op_val(result);
      v = field0;
      goto tail_call;
    }
  } else if (tag >= No_scan_tag) {
    sz = Wosize_hd (hd);
    st->live_bytes += Bhsize_hd(hd);
    result = alloc_shared(sz, tag);
    for (i = 0; i < sz; i++) {
      value curr = Op_val(v)[i];
      Op_val (result)[i] = curr;
    }
    *Hp_val (v) = 0;           /* Set forward flag */
    Op_val (v)[0] = result;    /*  and forward pointer. */
    CAMLassert (infix_offset == 0);
    *p = result;
  } else {
    CAMLassert (tag == Forward_tag);
    CAMLassert (infix_offset == 0);

    value f = Forward_val (v);
    tag_t ft = 0;

    if (Is_block (f)) {
      ft = Tag_val (Hd_val (f) == 0 ? Op_val (f)[0] : f);
    }

    if (ft == Forward_tag || ft == Lazy_tag || ft == Double_tag) {
      /* Do not short-circuit the pointer.  Copy as a normal block. */
      CAMLassert (Wosize_hd (hd) == 1);
      st->live_bytes += Bhsize_hd(hd);
      result = alloc_shared (1, Forward_tag);
      *p = result;
      *Hp_val (v) = 0;             /* Set (GC) forward flag */
      Op_val (v)[0] = result;      /*  and forward pointer. */
      p = Op_val (result);
      v = f;
      goto tail_call;
    } else {
      v = f;                        /* Follow the forwarding */
      goto tail_call;               /*  then oldify. */
    }
  }
}
示例#18
0
/* Finish the work that was put off by [oldify_one].
   Note that [oldify_one] itself is called by oldify_mopup, so we
   have to be careful to remove the first entry from the list before
   oldifying its fields. */
static void oldify_mopup (struct oldify_state* st)
{
  value v, new_v, f;
  mlsize_t i;
  caml_domain_state* domain_state =
    st->promote_domain ? st->promote_domain->state : Caml_state;
  struct caml_ephe_ref_table ephe_ref_table = domain_state->minor_tables->ephe_ref;
  struct caml_ephe_ref_elt *re;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  int redo = 0;

  while (st->todo_list != 0) {
    v = st->todo_list;                 /* Get the head. */
    CAMLassert (Hd_val (v) == 0);             /* It must be forwarded. */
    new_v = Op_val (v)[0];                /* Follow forward pointer. */
    st->todo_list = Op_val (new_v)[1]; /* Remove from list. */

    f = Op_val (new_v)[0];
    CAMLassert (!Is_debug_tag(f));
    if (Is_block (f) &&
        is_in_interval((value)Hp_val(v), young_ptr, young_end)) {
      oldify_one (st, f, Op_val (new_v));
    }
    for (i = 1; i < Wosize_val (new_v); i++){
      f = Op_val (v)[i];
      CAMLassert (!Is_debug_tag(f));
      if (Is_block (f) &&
          is_in_interval((value)Hp_val(v), young_ptr, young_end)) {
        oldify_one (st, f, Op_val (new_v) + i);
      } else {
        Op_val (new_v)[i] = f;
      }
    }
    CAMLassert (Wosize_val(new_v));
  }

  /* Oldify the data in the minor heap of alive ephemeron
     During minor collection keys outside the minor heap are considered alive */
  for (re = ephe_ref_table.base;
       re < ephe_ref_table.ptr; re++) {
    /* look only at ephemeron with data in the minor heap */
    if (re->offset == CAML_EPHE_DATA_OFFSET) {
      value *data = &Ephe_data(re->ephe);
      if (*data != caml_ephe_none && Is_block(*data) &&
          is_in_interval(*data, young_ptr, young_end)) {
        resolve_infix_val(data);
        if (Hd_val(*data) == 0) { /* Value copied to major heap */
          *data = Op_val(*data)[0];
        } else {
          if (ephe_check_alive_data(re, young_ptr, young_end)) {
            oldify_one(st, *data, data);
            redo = 1; /* oldify_todo_list can still be 0 */
          }
        }
      }
    }
  }

  if (redo) oldify_mopup (st);
}
示例#19
0
void caml_empty_minor_heap_domain (struct domain* domain)
{
  CAMLnoalloc;
  caml_domain_state* domain_state = domain->state;
  struct caml_minor_tables *minor_tables = domain_state->minor_tables;
  unsigned rewrite_successes = 0;
  unsigned rewrite_failures = 0;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  uintnat minor_allocated_bytes = young_end - young_ptr;
  struct oldify_state st = {0};
  value **r;
  struct caml_ephe_ref_elt *re;
  struct caml_custom_elt *elt;

  st.promote_domain = domain;

  if (minor_allocated_bytes != 0) {
    uintnat prev_alloc_words = domain_state->allocated_words;

#ifdef DEBUG
    /* In DEBUG mode, verify that the minor_ref table contains all young-young pointers
       from older to younger objects */
    {
    struct addrmap young_young_ptrs = ADDRMAP_INIT;
    mlsize_t i;
    value iter;
    for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) {
      *caml_addrmap_insert_pos(&young_young_ptrs, (value)*r) = 1;
    }
    for (iter = (value)young_ptr;
         iter < (value)young_end;
         iter = next_minor_block(domain_state, iter)) {
      value hd = Hd_hp(iter);
      if (hd != 0) {
        value curr = Val_hp(iter);
        tag_t tag = Tag_hd (hd);

        if (tag < No_scan_tag && tag != Cont_tag) {
          // FIXME: should scan Cont_tag
          for (i = 0; i < Wosize_hd(hd); i++) {
            value* f = Op_val(curr) + i;
            if (Is_block(*f) && is_in_interval(*f, young_ptr, young_end) &&
                *f < curr) {
              CAMLassert(caml_addrmap_contains(&young_young_ptrs, (value)f));
            }
          }
        }
      }
    }
    caml_addrmap_clear(&young_young_ptrs);
    }
#endif

    caml_gc_log ("Minor collection of domain %d starting", domain->state->id);
    caml_ev_begin("minor_gc");
    caml_ev_begin("minor_gc/roots");
    caml_do_local_roots(&oldify_one, &st, domain, 0);

    caml_scan_stack(&oldify_one, &st, domain_state->current_stack);

    for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) {
      value x = **r;
      oldify_one (&st, x, &x);
    }
    caml_ev_end("minor_gc/roots");

    caml_ev_begin("minor_gc/promote");
    oldify_mopup (&st);
    caml_ev_end("minor_gc/promote");

    caml_ev_begin("minor_gc/ephemerons");
    for (re = minor_tables->ephe_ref.base;
         re < minor_tables->ephe_ref.ptr; re++) {
      CAMLassert (Ephe_domain(re->ephe) == domain);
      if (re->offset == CAML_EPHE_DATA_OFFSET) {
        /* Data field has already been handled in oldify_mopup. Handle only
         * keys here. */
        continue;
      }
      value* key = &Op_val(re->ephe)[re->offset];
      if (*key != caml_ephe_none && Is_block(*key) &&
          is_in_interval(*key, young_ptr, young_end)) {
        resolve_infix_val(key);
        if (Hd_val(*key) == 0) { /* value copied to major heap */
          *key = Op_val(*key)[0];
        } else {
          CAMLassert(!ephe_check_alive_data(re,young_ptr,young_end));
          *key = caml_ephe_none;
          Ephe_data(re->ephe) = caml_ephe_none;
        }
      }
    }
    caml_ev_end("minor_gc/ephemerons");

    caml_ev_begin("minor_gc/update_minor_tables");
    for (r = minor_tables->major_ref.base;
         r < minor_tables->major_ref.ptr; r++) {
      value v = **r;
      if (Is_block (v) && is_in_interval ((value)Hp_val(v), young_ptr, young_end)) {
        value vnew;
        header_t hd = Hd_val(v);
        int offset = 0;
        if (Tag_hd(hd) == Infix_tag) {
          offset = Infix_offset_hd(hd);
          v -= offset;
        }
        CAMLassert (Hd_val(v) == 0);
        vnew = Op_val(v)[0] + offset;
        CAMLassert (Is_block(vnew) && !Is_minor(vnew));
        CAMLassert (Hd_val(vnew));
        if (Tag_hd(hd) == Infix_tag) {
          CAMLassert(Tag_val(vnew) == Infix_tag);
          v += offset;
        }
        if (caml_domain_alone()) {
          **r = vnew;
          ++rewrite_successes;
        } else {
          if (atomic_compare_exchange_strong((atomic_value*)*r, &v, vnew))
            ++rewrite_successes;
          else
            ++rewrite_failures;
        }
      }
    }
    CAMLassert (!caml_domain_alone() || rewrite_failures == 0);
    caml_ev_end("minor_gc/update_minor_tables");

    caml_ev_begin("minor_gc/finalisers");
    caml_final_update_last_minor(domain);
    /* Run custom block finalisation of dead minor values */
    for (elt = minor_tables->custom.base; elt < minor_tables->custom.ptr; elt++) {
      value v = elt->block;
      if (Hd_val(v) == 0) {
        /* !!caml_adjust_gc_speed(elt->mem, elt->max); */
      } else {
        /* Block will be freed: call finalisation function, if any */
        void (*final_fun)(value) = Custom_ops_val(v)->finalize;
        if (final_fun != NULL) final_fun(v);
      }
    }
    caml_final_empty_young(domain);
    caml_ev_end("minor_gc/finalisers");


    clear_table ((struct generic_table *)&minor_tables->major_ref);
    clear_table ((struct generic_table *)&minor_tables->minor_ref);
    clear_table ((struct generic_table *)&minor_tables->ephe_ref);
    clear_table ((struct generic_table *)&minor_tables->custom);

    domain_state->young_ptr = domain_state->young_end;
    domain_state->stat_minor_words += Wsize_bsize (minor_allocated_bytes);
    domain_state->stat_minor_collections++;
    domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words;

    caml_ev_end("minor_gc");
    caml_gc_log ("Minor collection of domain %d completed: %2.0f%% of %u KB live, rewrite: successes=%u failures=%u",
                 domain->state->id,
                 100.0 * (double)st.live_bytes / (double)minor_allocated_bytes,
                 (unsigned)(minor_allocated_bytes + 512)/1024, rewrite_successes, rewrite_failures);
  }
  else {
    caml_final_empty_young(domain);
    caml_gc_log ("Minor collection of domain %d: skipping", domain->state->id);
  }

#ifdef DEBUG
  {
    value *p;
    for (p = (value *) domain_state->young_start;
         p < (value *) domain_state->young_end; ++p){
      *p = Debug_free_minor;
    }
  }
#endif
}
CAMLprim value netsys_obj_address(value objv)
{
    return caml_copy_nativeint((intnat) Op_val(objv));
}
示例#21
0
/* 'a -> t */
CAMLprim value llvm_genericvalue_of_pointer(value V) {
  CAMLparam1(V);
  CAMLreturn(alloc_generic_value(LLVMCreateGenericValueOfPointer(Op_val(V))));
}
示例#22
0
CAMLexport value caml_promote(struct domain* domain, value root)
{
  value **r;
  value iter, f;
  mlsize_t i;
  caml_domain_state* domain_state = domain->state;
  struct caml_minor_tables *minor_tables = domain_state->minor_tables;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  float percent_to_scan;
  uintnat prev_alloc_words = domain_state->allocated_words;
  struct oldify_state st = {0};
  struct caml_ephe_ref_elt *re;

  /* Integers are already shared */
  if (Is_long(root))
    return root;

  /* Objects which are in the major heap are already shared. */
  if (!Is_minor(root))
    return root;

  st.oldest_promoted = (value)domain_state->young_start;
  st.promote_domain = domain;

  CAMLassert(caml_owner_of_young_block(root) == domain);
  oldify_one (&st, root, &root);
  oldify_mopup (&st);

  CAMLassert (!Is_minor(root));
  /* FIXME: surely a newly-allocated root is already darkened? */
  caml_darken(0, root, 0);

  percent_to_scan = st.oldest_promoted <= (value)young_ptr ? 0.0 :
    (((float)(st.oldest_promoted - (value)young_ptr)) * 100.0 /
     ((value)young_end - (value)domain_state->young_start));

  if (percent_to_scan > Percent_to_promote_with_GC) {
    caml_gc_log("caml_promote: forcing minor GC. %%_minor_to_scan=%f", percent_to_scan);
    // ???
    caml_empty_minor_heap_domain (domain);
  } else {
    caml_do_local_roots (&forward_pointer, st.promote_domain, domain, 1);
    caml_scan_stack (&forward_pointer, st.promote_domain, domain_state->current_stack);

    /* Scan major to young pointers. */
    for (r = minor_tables->major_ref.base;
         r < minor_tables->major_ref.ptr; r++) {
      value old_p = **r;
      if (Is_block(old_p) && is_in_interval(old_p,young_ptr,young_end)) {
        value new_p = old_p;
        forward_pointer (st.promote_domain, new_p, &new_p);
        if (old_p != new_p) {
          if (caml_domain_alone())
            **r = new_p;
          else
            atomic_compare_exchange_strong((atomic_value*)*r, &old_p, new_p);
        }
      }
    }

    /* Scan ephemeron ref table */
    for (re = minor_tables->ephe_ref.base;
         re < minor_tables->ephe_ref.ptr; re++) {
      value* key = &Op_val(re->ephe)[re->offset];
      if (Is_block(*key) && is_in_interval(*key,young_ptr,young_end)) {
        forward_pointer (st.promote_domain, *key, key);
      }
    }

    /* Scan young to young pointers */
    for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) {
      forward_pointer (st.promote_domain, **r, *r);
    }

    /* Scan newer objects */
    for (iter = (value)young_ptr;
         iter <= st.oldest_promoted;
         iter = next_minor_block(domain_state, iter)) {
      value hd = Hd_hp(iter);
      value curr = Val_hp(iter);
      if (hd != 0) {
        tag_t tag = Tag_hd (hd);
        if (tag == Cont_tag) {
          struct stack_info* stk = Ptr_val(Op_val(curr)[0]);
          if (stk != NULL)
            caml_scan_stack(&forward_pointer, st.promote_domain, stk);
        } else if (tag < No_scan_tag) {
          for (i = 0; i < Wosize_hd (hd); i++) {
            f = Op_val(curr)[i];
            if (Is_block(f)) {
              forward_pointer (st.promote_domain, f,((value*)curr) + i);
            }
          }
        }
      }
    }
  }
  domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words;
  return root;
}