Пример #1
0
static value caml_promote_one(struct promotion_stack* stk, struct domain* domain, value curr)
{
  header_t curr_block_hd;
  int infix_offset = 0;
  if (Is_long(curr) || !Is_minor(curr))
    return curr; /* needs no promotion */

  Assert(caml_owner_of_young_block(curr) == domain);

  curr_block_hd = Hd_val(curr);

  if (Tag_hd(curr_block_hd) == Infix_tag) {
    infix_offset = Infix_offset_val(curr);
    curr -= infix_offset;
    curr_block_hd = Hd_val(curr);
  }

  if (Is_promoted_hd(curr_block_hd)) {
    /* already promoted */
    return caml_addrmap_lookup(&domain->state->remembered_set->promotion, curr) + infix_offset;
  } else if (curr_block_hd == 0) {
    /* promoted by minor GC */
    return Op_val(curr)[0] + infix_offset;
  }

  /* otherwise, must promote */
  void* mem = caml_shared_try_alloc(domain->shared_heap, Wosize_hd(curr_block_hd),
                                           Tag_hd(curr_block_hd), 1);
  if (!mem) caml_fatal_error("allocation failure during promotion");
  value promoted = Val_hp(mem);
  Hd_val(curr) = Promotedhd_hd(curr_block_hd);

  caml_addrmap_insert(&domain->state->remembered_set->promotion, curr, promoted);
  caml_addrmap_insert(&domain->state->remembered_set->promotion_rev, promoted, curr);

  if (Tag_hd(curr_block_hd) >= No_scan_tag) {
    int i;
    for (i = 0; i < Wosize_hd(curr_block_hd); i++)
      Op_val(promoted)[i] = Op_val(curr)[i];
  } else {
    /* push to stack */
    if (stk->sp == stk->stack_len) {
      stk->stack_len = 2 * (stk->stack_len + 10);
      stk->stack = caml_stat_resize(stk->stack,
          sizeof(struct promotion_stack_entry) * stk->stack_len);
    }
    stk->stack[stk->sp].local = curr;
    stk->stack[stk->sp].global = promoted;
    stk->stack[stk->sp].field = 0;
    stk->sp++;
  }
  return promoted + infix_offset;
}
Пример #2
0
struct caml_heap_state* caml_init_shared_heap() {
  int i;
  struct caml_heap_state* heap;
  if (caml_domain_self()->is_main) {
    caml_plat_mutex_init(&pool_freelist.lock);
  }

  Assert(NOT_MARKABLE == Promotedhd_hd(0));

  heap = caml_stat_alloc(sizeof(struct caml_heap_state));
  heap->free_pools = 0;
  heap->num_free_pools = 0;
  for (i = 0; i<NUM_SIZECLASSES; i++) {
    heap->avail_pools[i] = heap->full_pools[i] =
      heap->unswept_avail_pools[i] = heap->unswept_full_pools[i] = 0;
  }
  heap->next_to_sweep = 0;
  heap->swept_large = 0;
  heap->unswept_large = 0;
  heap->owner = caml_domain_self();
  heap->pools_allocated = 0;
  heap->large_bytes_allocated = 0;
  return heap;
}