Esempio n. 1
0
static void verify_object(value v) {
  if (!Is_block(v)) return;

  if (Tag_val(v) == Infix_tag) {
    v -= Infix_offset_val(v);
    Assert(Tag_val(v) == Closure_tag);
  }

  intnat* entry = caml_addrmap_insert_pos(&verify_seen, v);
  if (*entry != ADDRMAP_NOT_PRESENT) return;
  *entry = 1;

  if (Has_status_hd(Hd_val(v), NOT_MARKABLE)) return;
  verify_objs++;

  if (!Is_minor(v)) {
    Assert(Has_status_hd(Hd_val(v), global.MARKED));
  }

  if (Tag_val(v) == Stack_tag) {
    caml_scan_stack(verify_push, v);
  } else if (Tag_val(v) < No_scan_tag) {
    int i;
    for (i = 0; i < Wosize_val(v); i++) {
      value f = Op_val(v)[i];
      if (Is_minor(v) && Is_minor(f)) {
        Assert(caml_owner_of_young_block(v) ==
               caml_owner_of_young_block(f));
      }
      if (Is_block(f)) verify_push(f, 0);
    }
  }
}
Esempio n. 2
0
static void verify_object(struct heap_verify_state* st, value v) {
  if (!Is_block(v)) return;

  Assert (Hd_val(v));
  if (Tag_val(v) == Infix_tag) {
    v -= Infix_offset_val(v);
    Assert(Tag_val(v) == Closure_tag);
  }

  intnat* entry = caml_addrmap_insert_pos(&st->seen, v);
  if (*entry != ADDRMAP_NOT_PRESENT) return;
  *entry = 1;

  if (Has_status_hd(Hd_val(v), NOT_MARKABLE)) return;
  st->objs++;

  // caml_gc_log ("verify_object: v=0x%lx hd=0x%lx tag=%u", v, Hd_val(v), Tag_val(v));
  if (!Is_minor(v)) {
    Assert(Has_status_hd(Hd_val(v), global.UNMARKED));
  }

  if (Tag_val(v) == Stack_tag) {
    caml_scan_stack(verify_push, st, v);
  } else if (Tag_val(v) < No_scan_tag) {
    int i;
    for (i = 0; i < Wosize_val(v); i++) {
      value f = Op_val(v)[i];
      if (Is_minor(v) && Is_minor(f)) {
        Assert(caml_owner_of_young_block(v) ==
               caml_owner_of_young_block(f));
      }
      if (Is_block(f)) verify_push(st, f, 0);
    }
  }
}
Esempio n. 3
0
int caml_mark_object(value p) {
  Assert (Is_block(p));
  header_t h = Hd_val(p);
  /* An object should have one of these statuses:
       - UNMARKED:     this object has not yet been traced
       - MARKED:       this object has already been traced or is being traced
       - NOT_MARKABLE: this object should be ignored by the GC */
  Assert (h && !Has_status_hd(h, global.GARBAGE));
  if (Has_status_hd(h, global.UNMARKED)) {
    Hd_val(p) = With_status_hd(h, global.MARKED);
    return 1;
  } else {
    return 0;
  }
}
Esempio n. 4
0
static void verify_pool(pool* a, sizeclass sz, struct mem_stats* s) {
  value* v;
  for (v = a->next_obj; v; v = (value*)v[1]) {
    Assert(*v == 0);
  }

  value* p = (value*)((char*)a + POOL_HEADER_SZ);
  value* end = (value*)a + POOL_WSIZE;
  mlsize_t wh = wsize_sizeclass[sz];
  s->overhead += Wsize_bsize(POOL_HEADER_SZ);

  while (p + wh <= end) {
    header_t hd = (header_t)*p;
    Assert(hd == 0 || !Has_status_hd(hd, global.GARBAGE));
    if (hd) {
      s->live += Whsize_hd(hd);
      s->overhead += wh - Whsize_hd(hd);
      s->live_blocks++;
    } else {
      s->free += wh;
    }
    p += wh;
  }
  Assert(end - p == wastage_sizeclass[sz]);
  s->overhead += end - p;
  s->alloced += POOL_WSIZE;
}
Esempio n. 5
0
static void verify_large(large_alloc* a, struct mem_stats* s) {
  for (; a; a = a->next) {
    header_t hd = *(header_t*)((char*)a + LARGE_ALLOC_HEADER_SZ);
    Assert (!Has_status_hd(hd, global.GARBAGE));
    s->alloced += Wsize_bsize(LARGE_ALLOC_HEADER_SZ) + Whsize_hd(hd);
    s->overhead += Wsize_bsize(LARGE_ALLOC_HEADER_SZ);
  }
}
Esempio n. 6
0
void caml_redarken_pool(struct pool* r, scanning_action f, void* fdata) {
  mlsize_t wh = wsize_sizeclass[r->sz];
  value* p = (value*)((char*)r + POOL_HEADER_SZ);
  value* end = (value*)((char*)r + Bsize_wsize(POOL_WSIZE));

  while (p + wh <= end) {
    header_t hd = p[0];
    if (hd != 0 && Has_status_hd(hd, global.MARKED)) {
      f(fdata, Val_hp(p), 0);
    }
    p += wh;
  }
}
Esempio n. 7
0
static intnat large_alloc_sweep(struct caml_heap_state* local) {
  large_alloc* a = local->unswept_large;
  if (!a) return 0;
  local->unswept_large = a->next;
  header_t hd = *(header_t*)((char*)a + LARGE_ALLOC_HEADER_SZ);
  if (Has_status_hd(hd, global.GARBAGE)) {
    local->large_bytes_allocated -= Bhsize_hd(hd);
    free(a);
  } else {
    a->next = local->swept_large;
    local->swept_large = a;
  }
  return Whsize_hd(hd);
}
Esempio n. 8
0
static intnat pool_sweep(struct caml_heap_state* local, pool** plist, sizeclass sz) {
  pool* a = *plist;
  if (!a) return 0;
  *plist = a->next;

  value* p = (value*)((char*)a + POOL_HEADER_SZ);
  value* end = (value*)a + POOL_WSIZE;
  mlsize_t wh = wsize_sizeclass[sz];
  int all_free = 1, all_used = 1;
  struct heap_stats* s = &local->stats;

  while (p + wh <= end) {
    header_t hd = (header_t)*p;
    if (hd == 0) {
      /* already on freelist */
      all_used = 0;
    } else if (Has_status_hd(hd, global.GARBAGE)) {
      Assert(Whsize_hd(hd) <= wh);
      /* add to freelist */
      p[0] = 0;
      p[1] = (value)a->next_obj;
      Assert(Is_block((value)p));
      a->next_obj = p;
      all_used = 0;
      /* update stats */
      s->pool_live_blocks--;
      s->pool_live_words -= Whsize_hd(hd);
      s->pool_frag_words -= (wh - Whsize_hd(hd));
    } else {
      /* still live */
      all_free = 0;
    }
    p += wh;
  }

  if (all_free) {
    pool_release(local, a, sz);
  } else {
    pool** list = all_used ? &local->full_pools[sz] : &local->avail_pools[sz];
    a->next = *list;
    *list = a;
  }

  return POOL_WSIZE;
}
Esempio n. 9
0
int is_garbage(value v) {
  return Has_status_hd(Hd_val(v), global.GARBAGE);
}
Esempio n. 10
0
void caml_shared_unpin(value v) {
  Assert (Is_block(v) && !Is_minor(v));
  Assert (caml_owner_of_shared_block(v) == caml_domain_self());
  Assert (Has_status_hd(Hd_val(v), NOT_MARKABLE));
  Hd_val(v) = With_status_hd(Hd_val(v), global.UNMARKED);
}