Ejemplo n.º 1
0
static void scan_native_globals(scanning_action f, void* fdata)
{
  int i, j;
  static link* dyn_globals;
  value glob;
  link* lnk;

  caml_plat_lock(&roots_mutex);
  dyn_globals = caml_dyn_globals;
  caml_plat_unlock(&roots_mutex);

  /* The global roots */
  for (i = 0; caml_globals[i] != 0; i++) {
    glob = caml_globals[i];
    for (j = 0; j < Wosize_val(glob); j++)
      f (fdata, Op_val(glob)[j], &Op_val(glob)[j]);
  }

  /* Dynamic (natdynlink) global roots */
  iter_list(dyn_globals, lnk) {
    glob = (value) lnk->data;
    for (j = 0; j < Wosize_val(glob); j++){
      f (fdata, Op_val(glob)[j], &Op_val(glob)[j]);
    }
  }
Ejemplo n.º 2
0
void caml_cleanup_deleted_roots()
{
  value r, prev;
  int first = 1;
  caml_plat_lock(&roots_mutex);

  r = roots_all;
  while (Is_block(r)) {
    Assert(!Is_foreign(Op_val(r)[2]));
    value next = Op_val(r)[2];
    if (Int_field(r, 1) == 0) {
      /* root was deleted, remove from list */
      if (first) {
        roots_all = next;
      } else {
        caml_modify_field(prev, 2, next);
      }
    }

    prev = r;
    first = 0;
    r = next;
  }

  caml_plat_unlock(&roots_mutex);
}
Ejemplo n.º 3
0
static pool* pool_acquire(struct caml_heap_state* local) {
  pool* r;

  if (local->num_free_pools > 0) {
    r = local->free_pools;
    local->free_pools = r->next;
    local->num_free_pools--;
  } else {
    caml_plat_lock(&pool_freelist.lock);
    if (!pool_freelist.free) {
      void* mem = caml_mem_map(Bsize_wsize(POOL_WSIZE) * POOLS_PER_ALLOCATION,
                               Bsize_wsize(POOL_WSIZE), 0 /* allocate */);
      int i;
      if (mem) {
        pool_freelist.free = mem;
        for (i=1; i<POOLS_PER_ALLOCATION; i++) {
          r = (pool*)(((uintnat)mem) + ((uintnat)i) * Bsize_wsize(POOL_WSIZE));
          r->next = pool_freelist.free;
          r->owner = 0;
          pool_freelist.free = r;
        }
      }
    }
    r = pool_freelist.free;
    if (r)
      pool_freelist.free = r->next;
    caml_plat_unlock(&pool_freelist.lock);
    local->pools_allocated++;
  }
  Assert (r->owner == 0);
  return r;
}
Ejemplo n.º 4
0
static void scan_global_roots(scanning_action f)
{
  value r, newr;
  caml_plat_lock(&roots_mutex);
  r = roots_all;
  caml_plat_unlock(&roots_mutex);
  
  Assert(!Is_minor(r));
  newr = r;
  f(newr, &newr);
  Assert(r == newr); /* GC should not move r, it is not young */
}
Ejemplo n.º 5
0
static struct pool* find_pool_to_rescan()
{
  struct pool* p;
  caml_plat_lock(&pools_to_rescan_lock);
  if (pools_to_rescan_count > 0) {
    p = pools_to_rescan[--pools_to_rescan_count];
    caml_gc_log("Redarkening pool %p (%d others left)", p, pools_to_rescan_count);
  } else {
    p = 0;
  }
  caml_plat_unlock(&pools_to_rescan_lock);
  return p;
}
Ejemplo n.º 6
0
CAMLexport caml_root caml_create_root(value init) 
{
  CAMLparam1(init);
  value v = caml_alloc_shr(3, 0);
  caml_initialize_field(v, 0, init);
  caml_initialize_field(v, 1, Val_int(1));
  
  caml_plat_lock(&roots_mutex);
  caml_initialize_field(v, 2, roots_all);
  roots_all = v;
  caml_plat_unlock(&roots_mutex);

  CAMLreturnT(caml_root, (caml_root)v);
}
Ejemplo n.º 7
0
static void pool_release(struct caml_heap_state* local, pool* pool) {
  pool->owner = 0;
  if (local->num_free_pools < MAX_LOCAL_FREE_POOLS) {
    local->num_free_pools++;
    pool->next = local->free_pools;
    local->free_pools = pool;
  } else {
    caml_plat_lock(&pool_freelist.lock);
    pool->next = pool_freelist.free;
    pool_freelist.free = pool;
    caml_plat_unlock(&pool_freelist.lock);
    local->pools_allocated--;
  }
}
Ejemplo n.º 8
0
static void pool_release(struct caml_heap_state* local, pool* pool, sizeclass sz) {
  pool->owner = 0;
  Assert(pool->sz == sz);
  local->stats.pool_words -= POOL_WSIZE;
  local->stats.pool_frag_words -= POOL_HEADER_WSIZE + wastage_sizeclass[sz];
  if (local->num_free_pools < MAX_LOCAL_FREE_POOLS) {
    /* FIXME: in the local cache, other domains can't get it */
    local->num_free_pools++;
    pool->next = local->free_pools;
    local->free_pools = pool;
  } else {
    /* FIXME: give free pools back to the OS */
    caml_plat_lock(&pool_freelist.lock);
    pool->next = pool_freelist.free;
    pool_freelist.free = pool;
    caml_plat_unlock(&pool_freelist.lock);
  }
}
Ejemplo n.º 9
0
CAMLexport const value* caml_named_value(char const *name)
{
  struct named_value * nv;
  caml_root ret = NULL;
  caml_plat_lock(&named_value_lock);
  for (nv = named_value_table[hash_value_name(name)];
       nv != NULL;
       nv = nv->next) {
    if (strcmp(name, nv->name) == 0){
      ret = nv->val;
      break;
    }
  }
  caml_plat_unlock(&named_value_lock);
  /* *ret should never be a minor object, since caml_create_root promotes */
  CAMLassert (!(ret && Is_minor(caml_read_root(ret))));
  return Op_val(ret);
}
Ejemplo n.º 10
0
static void mark_stack_prune ()
{
  struct addrmap t = ADDRMAP_INIT;
  int count = 0, entry;
  addrmap_iterator i;
  uintnat mark_stack_count = caml_domain_state->mark_stack_count;
  value* mark_stack = caml_domain_state->mark_stack;

  /* space used by the computations below */
  uintnat table_max = mark_stack_count / 100;
  if (table_max < 1000) table_max = 1000;

  /* amount of space we want to free up */
  int entries_to_free = (uintnat)(mark_stack_count * 0.20);

  /* We compress the mark stack by removing all of the objects from a
     subset of pools, which are rescanned later. For efficiency, we
     want to select those pools which occur most frequently, so that
     we need to rescan as few pools as possible. However, we do not
     have space to build a complete histogram.

     Using ~1% of the mark stack's space, we can find all of the
     elements that occur at least 100 times using the Misra-Gries
     heavy hitter algorithm (see J. Misra and D. Gries, "Finding
     repeated elements", 1982). */

  for (entry = 0; entry < mark_stack_count; entry++) {
    struct pool* pool = caml_pool_of_shared_block(mark_stack[entry]);
    if (!pool) continue;
    value p = (value)pool;
    if (caml_addrmap_contains(&t, p)) {
      /* if it's already present, increase the count */
      (*caml_addrmap_insert_pos(&t, p)) ++;
    } else if (count < table_max) {
      /* if there's space, insert it with count 1 */
      *caml_addrmap_insert_pos(&t, p) = 1;
      count++;
    } else {
      /* otherwise, decrease all entries by 1 */
      struct addrmap s = ADDRMAP_INIT;
      int scount = 0;
      for (i = caml_addrmap_iterator(&t);
           caml_addrmap_iter_ok(&t, i);
           i = caml_addrmap_next(&t, i)) {
        value k = caml_addrmap_iter_key(&t, i);
        value v = caml_addrmap_iter_value(&t, i);
        if (v > 1) {
          *caml_addrmap_insert_pos(&s, k) = v - 1;
          scount++;
        }
      }
      caml_addrmap_clear(&t);
      t = s;
      count = scount;
    }
  }

  /* t now contains all pools that occur at least 100 times.
     If no pools occur at least 100 times, t is some arbitrary subset of pools.
     Next, we get an accurate count of the occurrences of the pools in t */

  for (i = caml_addrmap_iterator(&t);
       caml_addrmap_iter_ok(&t, i);
       i = caml_addrmap_next(&t, i)) {
    *caml_addrmap_iter_val_pos(&t, i) = 0;
  }
  for (entry = 0; entry < mark_stack_count; entry++) {
    value p = (value)caml_pool_of_shared_block(mark_stack[entry]);
    if (p && caml_addrmap_contains(&t, p))
      (*caml_addrmap_insert_pos(&t, p))++;
  }

  /* Next, find a subset of those pools that covers enough entries */

  struct pool_count* pools = caml_stat_alloc(count * sizeof(struct pool_count));
  int pos = 0;
  for (i = caml_addrmap_iterator(&t);
       caml_addrmap_iter_ok(&t, i);
       i = caml_addrmap_next(&t, i)) {
    struct pool_count* p = &pools[pos++];
    p->pool = (struct pool*)caml_addrmap_iter_key(&t, i);
    p->occurs = (int)caml_addrmap_iter_value(&t, i);
  }
  Assert(pos == count);
  caml_addrmap_clear(&t);

  qsort(pools, count, sizeof(struct pool_count), &pool_count_cmp);

  int start = count, total = 0;
  while (start > 0 && total < entries_to_free) {
    start--;
    total += pools[start].occurs;
  }



  for (i = start; i < count; i++) {
    *caml_addrmap_insert_pos(&t, (value)pools[i].pool) = 1;
  }
  int out = 0;
  for (entry = 0; entry < mark_stack_count; entry++) {
    value v = mark_stack[entry];
    value p = (value)caml_pool_of_shared_block(v);
    if (!(p && caml_addrmap_contains(&t, p))) {
      mark_stack[out++] = v;
    }
  }
  caml_domain_state->mark_stack_count = out;

  caml_gc_log("Mark stack overflow. Postponing %d pools (%.1f%%, leaving %d).",
              count-start, 100. * (double)total / (double)mark_stack_count,
              (int)caml_domain_state->mark_stack_count);


  /* Add the pools to rescan to the global list.

     This must be done after the mark stack is filtered, since other
     threads race to remove pools from the global list. As soon as
     pools_to_rescan_lock is released, we cannot rely on pools being
     in the global list. */

  caml_plat_lock(&pools_to_rescan_lock);
  for (i = start; i < count; i++) {
    if (pools_to_rescan_count == pools_to_rescan_len) {
      pools_to_rescan_len = pools_to_rescan_len * 2 + 128;
      pools_to_rescan =
        caml_stat_resize(pools_to_rescan, pools_to_rescan_len * sizeof(struct pool*));
    }
    pools_to_rescan[pools_to_rescan_count++] = pools[i].pool;
  }
  caml_plat_unlock(&pools_to_rescan_lock);
}
Ejemplo n.º 11
0
void caml_register_dyn_global(void *v) {
  caml_plat_lock(&roots_mutex);
  caml_dyn_globals = cons((void*) v,caml_dyn_globals);
  caml_plat_unlock(&roots_mutex);
}