Пример #1
0
CAMLprim value caml_gc_counters(value v)
{
  CAMLparam0 ();   /* v is ignored */
  CAMLlocal1 (res);

  /* get a copy of these before allocating anything... */
#ifdef _KERNEL
  uintnat minwords = caml_stat_minor_words
                    + Wsize_bsize (caml_young_end - caml_young_ptr);
  uintnat prowords = caml_stat_promoted_words;
  uintnat majwords = caml_stat_major_words + caml_allocated_words;
#else
  double minwords = caml_stat_minor_words
                    + (double) Wsize_bsize (caml_young_end - caml_young_ptr);
  double prowords = caml_stat_promoted_words;
  double majwords = caml_stat_major_words + (double) caml_allocated_words;
#endif

  res = caml_alloc_tuple (3);
#ifdef _KERNEL
  Store_field (res, 0, Val_long (minwords));
  Store_field (res, 1, Val_long (prowords));
  Store_field (res, 2, Val_long (majwords));
#else
  Store_field (res, 0, caml_copy_double (minwords));
  Store_field (res, 1, caml_copy_double (prowords));
  Store_field (res, 2, caml_copy_double (majwords));
#endif
  CAMLreturn (res);
}
Пример #2
0
static void verify_large(large_alloc* a, struct mem_stats* s) {
  for (; a; a = a->next) {
    header_t hd = *(header_t*)((char*)a + LARGE_ALLOC_HEADER_SZ);
    Assert (!Has_status_hd(hd, global.GARBAGE));
    s->alloced += Wsize_bsize(LARGE_ALLOC_HEADER_SZ) + Whsize_hd(hd);
    s->overhead += Wsize_bsize(LARGE_ALLOC_HEADER_SZ);
  }
}
Пример #3
0
value gc_get (value v) /* ML */
{
  value res;

  Assert (v == Atom (0));
  /* Order of elements changed for Moscow ML */
  res = alloc (4, 0);
  Field (res, 1) = Wsize_bsize (Val_long (minor_heap_size));
  Field (res, 0) = Wsize_bsize (Val_long (major_heap_increment));
  Field (res, 2) = Val_long (percent_free);
  Field (res, 3) = Val_bool (verb_gc);
  return res;
}
Пример #4
0
CAMLprim value caml_gc_quick_stat(value v)
{
  CAMLparam0 ();
  CAMLlocal1 (res);

  /* get a copy of these before allocating anything... */
#ifdef _KERNEL
  uintnat minwords = caml_stat_minor_words
                    + Wsize_bsize (caml_young_end - caml_young_ptr);
  uintnat prowords = caml_stat_promoted_words;
  uintnat majwords = caml_stat_major_words + caml_allocated_words;
#else
  double minwords = caml_stat_minor_words
                    + (double) Wsize_bsize (caml_young_end - caml_young_ptr);
  double prowords = caml_stat_promoted_words;
  double majwords = caml_stat_major_words + (double) caml_allocated_words;
#endif
  intnat mincoll = caml_stat_minor_collections;
  intnat majcoll = caml_stat_major_collections;
  intnat heap_words = caml_stat_heap_size / sizeof (value);
  intnat top_heap_words = caml_stat_top_heap_size / sizeof (value);
  intnat cpct = caml_stat_compactions;
  intnat heap_chunks = caml_stat_heap_chunks;

  res = caml_alloc_tuple (16);
#ifdef _KERNEL
  Store_field (res, 0, Val_long (minwords));
  Store_field (res, 1, Val_long (prowords));
  Store_field (res, 2, Val_long (majwords));
#else
  Store_field (res, 0, caml_copy_double (minwords));
  Store_field (res, 1, caml_copy_double (prowords));
  Store_field (res, 2, caml_copy_double (majwords));
#endif
  Store_field (res, 3, Val_long (mincoll));
  Store_field (res, 4, Val_long (majcoll));
  Store_field (res, 5, Val_long (heap_words));
  Store_field (res, 6, Val_long (heap_chunks));
  Store_field (res, 7, Val_long (0));
  Store_field (res, 8, Val_long (0));
  Store_field (res, 9, Val_long (0));
  Store_field (res, 10, Val_long (0));
  Store_field (res, 11, Val_long (0));
  Store_field (res, 12, Val_long (0));
  Store_field (res, 13, Val_long (cpct));
  Store_field (res, 14, Val_long (top_heap_words));
  Store_field (res, 15, Val_long (caml_stack_usage()));
  CAMLreturn (res);
}
Пример #5
0
/* Use this function to tell the major GC to speed up when you use
   finalized blocks to automatically deallocate resources (other
   than memory). The GC will do at least one cycle every [max]
   allocated resources; [res] is the number of resources allocated
   this time.
   Note that only [res/max] is relevant.  The units (and kind of
   resource) can change between calls to [caml_adjust_gc_speed].
*/
CAMLexport void caml_adjust_gc_speed (mlsize_t res, mlsize_t max)
{
  if (max == 0) max = 1;
  if (res > max) res = max;
  caml_extra_heap_resources += (double) res / (double) max;
  if (caml_extra_heap_resources > 1.0){
    caml_extra_heap_resources = 1.0;
    caml_urge_major_slice ();
  }
  if (caml_extra_heap_resources
           > (double) Wsize_bsize (caml_minor_heap_size) / 2.0
             / (double) Wsize_bsize (caml_stat_heap_size)) {
    caml_urge_major_slice ();
  }
}
Пример #6
0
/* Take a chunk of memory as argument, which must be the result of a
   call to [caml_alloc_for_heap], and insert it into the heap chaining.
   The contents of the chunk must be a sequence of valid blocks and
   fragments: no space between blocks and no trailing garbage.  If
   some blocks are blue, they must be added to the free list by the
   caller.  All other blocks must have the color [caml_allocation_color(m)].
   The caller must update [caml_allocated_words] if applicable.
   Return value: 0 if no error; -1 in case of error.

   See also: caml_compact_heap, which duplicates most of this function.
*/
int caml_add_to_heap (char *m)
{
                                     Assert (Chunk_size (m) % Page_size == 0);
#ifdef DEBUG
  /* Should check the contents of the block. */
#endif /* debug */

  caml_gc_message (0x04, "Growing heap to %luk bytes\n",
                   (Bsize_wsize (caml_stat_heap_wsz) + Chunk_size (m)) / 1024);

  /* Register block in page table */
  if (caml_page_table_add(In_heap, m, m + Chunk_size(m)) != 0)
    return -1;

  /* Chain this heap chunk. */
  {
    char **last = &caml_heap_start;
    char *cur = *last;

    while (cur != NULL && cur < m){
      last = &(Chunk_next (cur));
      cur = *last;
    }
    Chunk_next (m) = cur;
    *last = m;

    ++ caml_stat_heap_chunks;
  }

  caml_stat_heap_wsz += Wsize_bsize (Chunk_size (m));
  if (caml_stat_heap_wsz > caml_stat_top_heap_wsz){
    caml_stat_top_heap_wsz = caml_stat_heap_wsz;
  }
  return 0;
}
Пример #7
0
void minor_collection(void)
{
  value **r;
  struct longjmp_buffer raise_buf;
  struct longjmp_buffer *old_external_raise;
  long prev_alloc_words = allocated_words;

  if (setjmp(raise_buf.buf)) {
    fatal_error ("Fatal error: out of memory.\n");
  }
  old_external_raise = external_raise;
  external_raise = &raise_buf;

  beg_gc_time();

  gc_message ("<", 0);
  local_roots (oldify);
  for (r = ref_table; r < ref_table_ptr; r++) oldify (*r, **r);
  stat_minor_words += Wsize_bsize (young_ptr - young_start);
  young_ptr = young_start;
  ref_table_ptr = ref_table;
  ref_table_limit = ref_table_threshold;
  gc_message (">", 0);

  external_raise = old_external_raise;

  stat_promoted_words += allocated_words - prev_alloc_words;
  ++ stat_minor_collections;
  major_collection_slice ();

  end_gc_time();

}
Пример #8
0
CAMLprim value caml_gc_get(value v)
{
  CAMLparam0 ();   /* v is ignored */
  CAMLlocal1 (res);

  res = caml_alloc_tuple (7);
#ifndef NATIVE_CODE
  Store_field (res, 5, Val_long (caml_max_stack_size));                 /* l */
#else
  Store_field (res, 5, Val_long (0));
#endif

  CAMLreturn (res);

#if 0
  CAMLparam0 ();   /* v is ignored */
  CAMLlocal1 (res);

  res = caml_alloc_tuple (7);
  Store_field (res, 0, Val_long (Wsize_bsize (Caml_state->minor_heap_size)));  /* s */
  Store_field (res, 1, Val_long (caml_major_heap_increment));           /* i */
  Store_field (res, 2, Val_long (caml_percent_free));                   /* o */
  Store_field (res, 3, Val_long (caml_params->verb_gc));         /* v */
  Store_field (res, 4, Val_long (caml_percent_max));                    /* O */
#ifndef NATIVE_CODE
  Store_field (res, 5, Val_long (caml_max_stack_size));                 /* l */
#else
  Store_field (res, 5, Val_long (0));
#endif
  Store_field (res, 6, Val_long (caml_allocation_policy));              /* a */
  Store_field (res, 7, Val_long (caml_major_window));                   /* w */
  CAMLreturn (res);
#endif
}
Пример #9
0
static void verify_pool(pool* a, sizeclass sz, struct mem_stats* s) {
  value* v;
  for (v = a->next_obj; v; v = (value*)v[1]) {
    Assert(*v == 0);
  }

  value* p = (value*)((char*)a + POOL_HEADER_SZ);
  value* end = (value*)a + POOL_WSIZE;
  mlsize_t wh = wsize_sizeclass[sz];
  s->overhead += Wsize_bsize(POOL_HEADER_SZ);

  while (p + wh <= end) {
    header_t hd = (header_t)*p;
    Assert(hd == 0 || !Has_status_hd(hd, global.GARBAGE));
    if (hd) {
      s->live += Whsize_hd(hd);
      s->overhead += wh - Whsize_hd(hd);
      s->live_blocks++;
    } else {
      s->free += wh;
    }
    p += wh;
  }
  Assert(end - p == wastage_sizeclass[sz]);
  s->overhead += end - p;
  s->alloced += POOL_WSIZE;
}
Пример #10
0
void caml_compact_heap_maybe (void)
{
  /* Estimated free words in the heap:
         FW = fl_size_at_change + 3 * (caml_fl_cur_size
                                       - caml_fl_size_at_phase_change)
         FW = 3 * caml_fl_cur_size - 2 * caml_fl_size_at_phase_change
     Estimated live words:      LW = caml_stat_heap_size - FW
     Estimated free percentage: FP = 100 * FW / LW
     We compact the heap if FP > caml_percent_max
  */
  uintnat fw, fp;

  Assert (caml_gc_phase == Phase_idle);
  if (caml_percent_max >= 1000000) return;
  if (caml_stat_major_collections < 3 || caml_stat_heap_chunks < 3) return;

  fw = 3 * caml_fl_cur_size - 2 * caml_fl_size_at_phase_change;
  if (fw < 0) fw = caml_fl_cur_size;

  if (fw >= Wsize_bsize (caml_stat_heap_size)){
    fp = 1000000;
  }else{
    fp = 100 * fw / (Wsize_bsize (caml_stat_heap_size) - fw);
    if (fp > 1000000) fp = 1000000;
  }
  caml_gc_message (0x200, "FL size at phase change = %"
                          ARCH_INTNAT_PRINTF_FORMAT "u\n",
                   caml_fl_size_at_phase_change);
  caml_gc_message (0x200, "Estimated overhead = %"
                          ARCH_INTNAT_PRINTF_FORMAT "u%%\n",
                   fp);
  if (fp >= caml_percent_max){
    caml_gc_message (0x200, "Automatic compaction triggered.\n", 0);
    caml_finish_major_cycle ();

    /* We just did a complete GC, so we can measure the overhead exactly. */
    fw = caml_fl_cur_size;
    fp = 100 * fw / (Wsize_bsize (caml_stat_heap_size) - fw);
    caml_gc_message (0x200, "Measured overhead: %"
                            ARCH_INTNAT_PRINTF_FORMAT "u%%\n",
                     fp);

    caml_compact_heap ();
  }
}
Пример #11
0
CAMLprim value caml_gc_get(value v)
{
    CAMLparam0 ();   /* v is ignored */
    CAMLlocal1 (res);

    res = caml_alloc_tuple (6);
    Store_field (res, 0, Val_long (Wsize_bsize (caml_minor_heap_size)));  /* s */
    Store_field (res, 1,Val_long(Wsize_bsize(caml_major_heap_increment)));/* i */
    Store_field (res, 2, Val_long (caml_percent_free));                   /* o */
    Store_field (res, 3, Val_long (caml_verb_gc));                        /* v */
    Store_field (res, 4, Val_long (caml_percent_max));                    /* O */
#ifndef NATIVE_CODE
    Store_field (res, 5, Val_long (caml_max_stack_size));                 /* l */
#else
    Store_field (res, 5, Val_long (0));
#endif
    CAMLreturn (res);
}
Пример #12
0
/* Use this function to tell the major GC to speed up when you use
   finalized objects to automatically deallocate extra-heap objects.
   The GC will do at least one cycle every [max] allocated words;
   [mem] is the number of words allocated this time.
   Note that only [mem/max] is relevant.  You can use numbers of bytes
   (or kilobytes, ...) instead of words.  You can change units between
   calls to [adjust_collector_speed].
*/
void adjust_gc_speed (mlsize_t mem, mlsize_t max)
{
  if (max == 0) max = 1;
  if (mem > max) mem = max;
  extra_heap_memory += ((float) mem / max) * stat_heap_size;
  if (extra_heap_memory > stat_heap_size){
    extra_heap_memory = stat_heap_size;
  }
  if (extra_heap_memory > Wsize_bsize (minor_heap_size) / 2) force_minor_gc ();
}
Пример #13
0
void caml_compact_heap (void)
{
    uintnat target_size, live;

    do_compaction ();
    /* Compaction may fail to shrink the heap to a reasonable size
       because it deals in complete chunks: if a very large chunk
       is at the beginning of the heap, everything gets moved to
       it and it is not freed.

       In that case, we allocate a new chunk of the desired heap
       size, chain it at the beginning of the heap (thus pretending
       its address is smaller), and launch a second compaction.
       This will move all data to this new chunk and free the
       very large chunk.

       See PR#5389
    */
    /* We compute:
       freewords = caml_fl_cur_size          (exact)
       heapsize = caml_heap_size             (exact)
       live = heap_size - freewords
       target_size = live * (1 + caml_percent_free / 100)
                   = live / 100 * (100 + caml_percent_free)
       We add 1 to live/100 to make sure it isn't 0.

       We recompact if target_size < heap_size / 2
    */
    live = caml_stat_heap_size - Bsize_wsize (caml_fl_cur_size);
    target_size = (live / 100 + 1) * (100 + caml_percent_free);
    target_size = caml_round_heap_chunk_size (target_size);
    if (target_size < caml_stat_heap_size / 2) {
        char *chunk;

        /* round it up to a page size */
        chunk = caml_alloc_for_heap (target_size);
        if (chunk == NULL) return;
        caml_make_free_blocks ((value *) chunk,
                               Wsize_bsize (Chunk_size (chunk)), 0);
        if (caml_page_table_add (In_heap, chunk, chunk + Chunk_size (chunk)) != 0) {
            caml_free_for_heap (chunk);
            return;
        }
        Chunk_next (chunk) = caml_heap_start;
        caml_heap_start = chunk;
        ++ caml_stat_heap_chunks;
        caml_stat_heap_size += Chunk_size (chunk);
        if (caml_stat_heap_size > caml_stat_top_heap_size) {
            caml_stat_top_heap_size = caml_stat_heap_size;
        }
        do_compaction ();
        Assert (caml_stat_heap_chunks == 1);
        Assert (Chunk_next (caml_heap_start) == NULL);
    }
}
Пример #14
0
static void* large_allocate(struct caml_heap_state* local, mlsize_t sz) {
  large_alloc* a = malloc(sz + LARGE_ALLOC_HEADER_SZ);
  if (!a) caml_raise_out_of_memory();
  local->stats.large_words += Wsize_bsize(sz + LARGE_ALLOC_HEADER_SZ);
  if (local->stats.large_words > local->stats.large_max_words)
    local->stats.large_max_words = local->stats.large_words;
  local->stats.large_blocks++;
  a->owner = local->owner;
  a->next = local->swept_large;
  local->swept_large = a;
  return (char*)a + LARGE_ALLOC_HEADER_SZ;
}
Пример #15
0
/* Remove the heap chunk [chunk] from the heap and give the memory back
   to [free].
*/
void caml_shrink_heap (char *chunk)
{
  char **cp;

  /* Never deallocate the first chunk, because caml_heap_start is both the
     first block and the base address for page numbers, and we don't
     want to shift the page table, it's too messy (see above).
     It will never happen anyway, because of the way compaction works.
     (see compact.c)
     XXX FIXME this has become false with the fix to PR#5389 (see compact.c)
  */
  if (chunk == caml_heap_start) return;

  caml_stat_heap_wsz -= Wsize_bsize (Chunk_size (chunk));
  caml_gc_message (0x04, "Shrinking heap to %luk words\n",
                   (unsigned long) caml_stat_heap_wsz / 1024);

#ifdef DEBUG
  {
    mlsize_t i;
    for (i = 0; i < Wsize_bsize (Chunk_size (chunk)); i++){
      ((value *) chunk) [i] = Debug_free_shrink;
    }
  }
#endif

  -- caml_stat_heap_chunks;

  /* Remove [chunk] from the list of chunks. */
  cp = &caml_heap_start;
  while (*cp != chunk) cp = &(Chunk_next (*cp));
  *cp = Chunk_next (chunk);

  /* Remove the pages of [chunk] from the page table. */
  caml_page_table_remove(In_heap, chunk, chunk + Chunk_size (chunk));

  /* Free the [malloc] block that contains [chunk]. */
  caml_free_for_heap (chunk);
}
Пример #16
0
/* Make sure the minor heap is empty by performing a minor collection
   if needed.
*/
void caml_empty_minor_heap (void)
{
    value **r;
    uintnat prev_alloc_words;

    if (caml_young_ptr != caml_young_end) {
        if (caml_minor_gc_begin_hook != NULL) (*caml_minor_gc_begin_hook) ();
        prev_alloc_words = caml_allocated_words;
        caml_in_minor_collection = 1;
        caml_gc_message (0x02, "<", 0);
        caml_oldify_local_roots();
        for (r = caml_ref_table.base; r < caml_ref_table.ptr; r++) {
            caml_oldify_one (**r, *r);
        }
        caml_oldify_mopup ();
        for (r = caml_weak_ref_table.base; r < caml_weak_ref_table.ptr; r++) {
            if (Is_block (**r) && Is_young (**r)) {
                if (Hd_val (**r) == 0) {
                    **r = Field (**r, 0);
                } else {
                    **r = caml_weak_none;
                }
            }
        }
        if (caml_young_ptr < caml_young_start) caml_young_ptr = caml_young_start;
        caml_stat_minor_words += Wsize_bsize (caml_young_end - caml_young_ptr);
        caml_young_ptr = caml_young_end;
        caml_young_limit = caml_young_start;
        clear_table (&caml_ref_table);
        clear_table (&caml_weak_ref_table);
        caml_gc_message (0x02, ">", 0);
        caml_in_minor_collection = 0;
        caml_stat_promoted_words += caml_allocated_words - prev_alloc_words;
        ++ caml_stat_minor_collections;
        caml_final_empty_young ();
        if (caml_minor_gc_end_hook != NULL) (*caml_minor_gc_end_hook) ();
    } else {
        caml_final_empty_young ();
    }
#ifdef DEBUG
    {
        value *p;
        for (p = (value *) caml_young_start; p < (value *) caml_young_end; ++p) {
            *p = Debug_free_minor;
        }
        ++ minor_gc_counter;
    }
#endif
}
Пример #17
0
static void test_and_compact (void)
{
    float fp;

    fp = 100.0 * caml_fl_cur_size
         / (Wsize_bsize (caml_stat_heap_size) - caml_fl_cur_size);
    if (fp > 1000000.0) fp = 1000000.0;
    caml_gc_message (0x200, "Estimated overhead (lower bound) = %"
                     ARCH_INTNAT_PRINTF_FORMAT "u%%\n",
                     (uintnat) fp);
    if (fp >= caml_percent_max && caml_stat_heap_chunks > 1) {
        caml_gc_message (0x200, "Automatic compaction triggered.\n", 0);
        caml_compact_heap ();
    }
}
Пример #18
0
static intnat large_alloc_sweep(struct caml_heap_state* local) {
  large_alloc* a = local->unswept_large;
  if (!a) return 0;
  local->unswept_large = a->next;
  header_t hd = *(header_t*)((char*)a + LARGE_ALLOC_HEADER_SZ);
  if (Has_status_hd(hd, global.GARBAGE)) {
    local->stats.large_words -=
      Whsize_hd(hd) + Wsize_bsize(LARGE_ALLOC_HEADER_SZ);
    local->stats.large_blocks--;
    free(a);
  } else {
    a->next = local->swept_large;
    local->swept_large = a;
  }
  return Whsize_hd(hd);
}
Пример #19
0
CAMLprim value caml_gc_counters(value v)
{
    CAMLparam0 ();   /* v is ignored */
    CAMLlocal1 (res);

    /* get a copy of these before allocating anything... */
    double minwords = caml_stat_minor_words
                      + (double) Wsize_bsize (caml_young_end - caml_young_ptr);
    double prowords = caml_stat_promoted_words;
    double majwords = caml_stat_major_words + (double) caml_allocated_words;

    res = caml_alloc_tuple (3);
    Store_field (res, 0, caml_copy_double (minwords));
    Store_field (res, 1, caml_copy_double (prowords));
    Store_field (res, 2, caml_copy_double (majwords));
    CAMLreturn (res);
}
Пример #20
0
CAMLexport value caml_alloc_shr (mlsize_t wosize, tag_t tag)
{
  char *hp, *new_block;

  if (wosize > Max_wosize) caml_raise_out_of_memory ();
  hp = caml_fl_allocate (wosize);
  if (hp == NULL){
    new_block = expand_heap (wosize);
    if (new_block == NULL) {
      if (caml_in_minor_collection)
        caml_fatal_error ("Fatal error: out of memory.\n");
      else
        caml_raise_out_of_memory ();
    }
    caml_fl_add_blocks (new_block);
    hp = caml_fl_allocate (wosize);
  }

  Assert (Is_in_heap (Val_hp (hp)));

  /* Inline expansion of caml_allocation_color. */
  if (caml_gc_phase == Phase_mark
      || (caml_gc_phase == Phase_sweep && (addr)hp >= (addr)caml_gc_sweep_hp)){
    Hd_hp (hp) = Make_header (wosize, tag, Caml_black);
  }else{
    Assert (caml_gc_phase == Phase_idle
            || (caml_gc_phase == Phase_sweep
                && (addr)hp < (addr)caml_gc_sweep_hp));
    Hd_hp (hp) = Make_header (wosize, tag, Caml_white);
  }
  Assert (Hd_hp (hp) == Make_header (wosize, tag, caml_allocation_color (hp)));
  caml_allocated_words += Whsize_wosize (wosize);
  if (caml_allocated_words > Wsize_bsize (caml_minor_heap_size)){
    caml_urge_major_slice ();
  }
#ifdef DEBUG
  {
    uintnat i;
    for (i = 0; i < wosize; i++){
      Field (Val_hp (hp), i) = Debug_uninit_major;
    }
  }
#endif
  return Val_hp (hp);
}
Пример #21
0
CAMLprim value caml_gc_counters(value v)
{
  CAMLparam0 ();   /* v is ignored */
  CAMLlocal1 (res);

  /* get a copy of these before allocating anything... */
  double minwords = Caml_state->stat_minor_words
                    + ((double) Wsize_bsize (Caml_state->young_end -
                                            Caml_state->young_ptr)) / sizeof(value);
  double prowords = Caml_state->stat_promoted_words;
  double majwords = Caml_state->stat_major_words + (double) Caml_state->allocated_words;

  res = caml_alloc_3(0,
    caml_copy_double (minwords),
    caml_copy_double (prowords),
    caml_copy_double (majwords));
  CAMLreturn (res);
}
Пример #22
0
static uintnat default_slice_budget() {
  /*
     Free memory at the start of the GC cycle (garbage + free list) (assumed):
                 FM = caml_stat_heap_size * caml_percent_free
                      / (100 + caml_percent_free)

     Assuming steady state and enforcing a constant allocation rate, then
     FM is divided in 2/3 for garbage and 1/3 for free list.
                 G = 2 * FM / 3
     G is also the amount of memory that will be used during this cycle
     (still assuming steady state).

     Proportion of G consumed since the previous slice:
                 PH = caml_domain_state->allocated_words / G
                    = caml_domain_state->allocated_words * 3 * (100 + caml_percent_free)
                      / (2 * caml_stat_heap_size * caml_percent_free)
     Proportion of extra-heap resources consumed since the previous slice:
                 PE = caml_extra_heap_resources
     Proportion of total work to do in this slice:
                 P  = max (PH, PE)
     Amount of marking work for the GC cycle:
                 MW = caml_stat_heap_size * 100 / (100 + caml_percent_free)
     Amount of sweeping work for the GC cycle:
                 SW = caml_stat_heap_size

     Total amount of work for the GC cycle:
                 TW = MW + SW

     Amount of work to do for this slice:
                 W = P * TW
  */
  uintnat heap_size = caml_heap_size(caml_domain_self()->shared_heap);
  double heap_words = (double)Wsize_bsize(heap_size);
  double p = (double) caml_domain_state->allocated_words * 3.0 * (100 + caml_percent_free)
      / heap_words / caml_percent_free / 2.0;

  double total_work =
    heap_words * 100 / (100 + caml_percent_free) /* marking */
    + heap_words; /* sweeping */

  return (intnat)(p * total_work);
  //return 1ll << 50;
}
Пример #23
0
/* Remove the heap chunk [chunk] from the heap and give the memory back
   to [free].
*/
void caml_shrink_heap (char *chunk)
{
  char **cp;
  asize_t i;

  /* Never deallocate the first block, because caml_heap_start is both the
     first block and the base address for page numbers, and we don't
     want to shift the page table, it's too messy (see above).
     It will never happen anyway, because of the way compaction works.
     (see compact.c)
  */
  if (chunk == caml_heap_start) return;

  caml_stat_heap_size -= Chunk_size (chunk);
  caml_gc_message (0x04, "Shrinking heap to %luk bytes\n",
                   caml_stat_heap_size / 1024);

#ifdef DEBUG
  {
    mlsize_t i;
    for (i = 0; i < Wsize_bsize (Chunk_size (chunk)); i++){
      ((value *) chunk) [i] = Debug_free_shrink;
    }
  }
#endif

  -- caml_stat_heap_chunks;

  /* Remove [chunk] from the list of chunks. */
  cp = &caml_heap_start;
  while (*cp != chunk) cp = &(Chunk_next (*cp));
  *cp = Chunk_next (chunk);

  /* Remove the pages of [chunk] from the page table. */
  for (i = Page (chunk); i < Page (chunk + Chunk_size (chunk)); i++){
    caml_page_table [i] = Not_in_heap;
  }

  /* Free the [malloc] block that contains [chunk]. */
  caml_free_for_heap (chunk);
}
Пример #24
0
/* Check that [v]'s header looks good.  [v] must be a block in the heap. */
static void check_head (value v)
{
  Assert (Is_block (v));
  Assert (Is_in_heap (v));

  Assert (Wosize_val (v) != 0);
  Assert (Color_hd (Hd_val (v)) != Caml_blue);
  Assert (Is_in_heap (v));
  if (Tag_val (v) == Infix_tag){
    int offset = Wsize_bsize (Infix_offset_val (v));
    value trueval = Val_op (&Field (v, -offset));
    Assert (Tag_val (trueval) == Closure_tag);
    Assert (Wosize_val (trueval) > offset);
    Assert (Is_in_heap (&Field (trueval, Wosize_val (trueval) - 1)));
  }else{
    Assert (Is_in_heap (&Field (v, Wosize_val (v) - 1)));
  }
  if (Tag_val (v) ==  Double_tag){
    Assert (Wosize_val (v) == Double_wosize);
  }else if (Tag_val (v) == Double_array_tag){
    Assert (Wosize_val (v) % Double_wosize == 0);
  }
}
Пример #25
0
EXTERN value alloc_shr (mlsize_t wosize, tag_t tag)
{
  char *hp, *new_block;

  hp = fl_allocate (wosize);
  if (hp == NULL){
    new_block = expand_heap (wosize);
    if (new_block == NULL) raise_out_of_memory ();
    fl_add_block (new_block);
    hp = fl_allocate (wosize);
    if (hp == NULL) fatal_error ("alloc_shr: expand heap failed\n");
  }

  Assert (Is_in_heap (Val_hp (hp)));

  if (gc_phase == Phase_mark || (addr)hp >= (addr)gc_sweep_hp){
    Hd_hp (hp) = Make_header (wosize, tag, Black);
  }else{
    Hd_hp (hp) = Make_header (wosize, tag, White);
  }
  allocated_words += Whsize_wosize (wosize);
  if (allocated_words > Wsize_bsize (minor_heap_size)) force_minor_gc ();
  return Val_hp (hp);
}
int netsys_init_value_1(struct htab *t,
			struct nqueue *q,
			char *dest,
			char *dest_end,
			value orig,  
			int enable_bigarrays, 
			int enable_customs,
			int enable_atoms,
			int simulation,
			void *target_addr,
			struct named_custom_ops *target_custom_ops,
			int color,
			intnat *start_offset,
			intnat *bytelen
			)
{
    void *orig_addr;
    void *work_addr;
    value work;
    int   work_tag;
    char *work_header;
    size_t work_bytes;
    size_t work_words;
    void *copy_addr;
    value copy;
    char *copy_header;
    header_t copy_header1;
    int   copy_tag;
    size_t copy_words;
    void *fixup_addr;
    char *dest_cur;
    char *dest_ptr;
    int code, i;
    intnat addr_delta;
    struct named_custom_ops *ops_ptr;
    void *int32_target_ops;
    void *int64_target_ops;
    void *nativeint_target_ops;
    void *bigarray_target_ops;

    copy = 0;

    dest_cur = dest;
    addr_delta = ((char *) target_addr) - dest;

    if (dest_cur >= dest_end && !simulation) return (-4);   /* out of space */

    if (!Is_block(orig)) return (-2);

    orig_addr = (void *) orig;
    code = netsys_queue_add(q, orig_addr);
    if (code != 0) return code;

    /* initialize *_target_ops */
    bigarray_target_ops = NULL;
    int32_target_ops = NULL;
    int64_target_ops = NULL;
    nativeint_target_ops = NULL;
    ops_ptr = target_custom_ops;
    while (ops_ptr != NULL) {
	if (strcmp(ops_ptr->name, "_bigarray") == 0)
	    bigarray_target_ops = ops_ptr->ops;
	else if (strcmp(ops_ptr->name, "_i") == 0)
	    int32_target_ops = ops_ptr->ops;
	else if (strcmp(ops_ptr->name, "_j") == 0)
	    int64_target_ops = ops_ptr->ops;
	else if (strcmp(ops_ptr->name, "_n") == 0)
	    nativeint_target_ops = ops_ptr->ops;
	ops_ptr = ops_ptr->next;
    };

    /* First pass: Iterate over the addresses found in q. Ignore
       addresses already seen in the past (which are in t). For
       new addresses, make a copy, and add these copies to t.
    */

    /* fprintf(stderr, "first pass, orig_addr=%lx simulation=%d addr_delta=%lx\n",
       (unsigned long) orig_addr, simulation, addr_delta);
    */

    code = netsys_queue_take(q, &work_addr);
    while (code != (-3)) {
	if (code != 0) return code;

	/* fprintf(stderr, "work_addr=%lx\n", (unsigned long) work_addr); */

	code = netsys_htab_lookup(t, work_addr, &copy_addr);
	if (code != 0) return code;

	if (copy_addr == NULL) {
	    /* The address is unknown, so copy the value */

	    /* Body of first pass */
	    work = (value) work_addr;
	    work_tag = Tag_val(work);
	    work_header = Hp_val(work);
	    
	    if (work_tag < No_scan_tag) {
		/* It is a scanned value (with subvalues) */
		
		switch(work_tag) {
		case Object_tag:
		case Closure_tag:
		case Lazy_tag:
		case Forward_tag:
		    return (-2);   /* unsupported */
		}

		work_words = Wosize_hp(work_header);
		if (work_words == 0) {
		    if (!enable_atoms) return (-2);
		    if (enable_atoms == 1) goto next;
		};
		
		/* Do the copy. */

		work_bytes = Bhsize_hp(work_header);
		copy_header = dest_cur;
		dest_cur += work_bytes;
		if (dest_cur > dest_end && !simulation) return (-4);
		
		if (simulation) 
		    copy_addr = work_addr;
		else {
		    memcpy(copy_header, work_header, work_bytes);
		    copy = Val_hp(copy_header);
		    copy_addr = (void *) copy;
		    Hd_val(copy) = Whitehd_hd(Hd_val(copy)) | color;
		}

		/* Add the association (work_addr -> copy_addr) to t: */

		code = netsys_htab_add(t, work_addr, copy_addr);
		if (code < 0) return code;

		/* Add the sub values of work_addr to q: */

		for (i=0; i < work_words; ++i) {
		    value field = Field(work, i);
		    if (Is_block (field)) {
			code = netsys_queue_add(q, (void *) field);
			if (code != 0) return code;
		    }
		}
	    }
	    else {
		/* It an opaque value */
		int do_copy = 0;
		int do_bigarray = 0;
		void *target_ops = NULL;
		char caml_id = ' ';  /* only b, i, j, n */
		/* Check for bigarrays and other custom blocks */
		switch (work_tag) {
		case Abstract_tag:
		    return(-2);
		case String_tag:
		    do_copy = 1; break;
		case Double_tag:
		    do_copy = 1; break;
		case Double_array_tag:
		    do_copy = 1; break;
		case Custom_tag: 
		    {
			struct custom_operations *custom_ops;
			char *id;

			custom_ops = Custom_ops_val(work);
			id = custom_ops->identifier;
			if (id[0] == '_') {
			    switch (id[1]) {
			    case 'b':
				if (!enable_bigarrays) return (-2);
				if (strcmp(id, "_bigarray") == 0) {
				    caml_id = 'b';
				    break;
				}
			    case 'i': /* int32 */
			    case 'j': /* int64 */
			    case 'n': /* nativeint */
				if (!enable_customs) return (-2);
				if (id[2] == 0) {
				    caml_id = id[1];
				    break;
				}
			    default:
				return (-2);
			    }
			}
			else
			    return (-2);
		    }
		}; /* switch */

		switch (caml_id) {  /* look closer at some cases */
		case 'b': {
		    target_ops = bigarray_target_ops;
		    do_copy = 1;
		    do_bigarray = 1;
		    break;
		}
		case 'i':
		    target_ops = int32_target_ops; do_copy = 1; break;
		case 'j':
		    target_ops = int64_target_ops; do_copy = 1; break;
		case 'n':
		    target_ops = nativeint_target_ops; do_copy = 1; break;
		};

		if (do_copy) {  
		    /* Copy the value */
		    work_bytes = Bhsize_hp(work_header);
		    copy_header = dest_cur;
		    dest_cur += work_bytes;

		    if (simulation)
			copy_addr = work_addr;
		    else {
			if (dest_cur > dest_end) return (-4);
			memcpy(copy_header, work_header, work_bytes);
			copy = Val_hp(copy_header);
			copy_addr = (void *) copy;
			Hd_val(copy) = Whitehd_hd(Hd_val(copy)) | color;
			if (target_ops != NULL)
			    Custom_ops_val(copy) = target_ops;
		    }
		    
		    code = netsys_htab_add(t, work_addr, copy_addr);
		    if (code < 0) return code;
		}

		if (do_bigarray) {
		    /* postprocessing for copying bigarrays */
		    struct caml_ba_array *b_work, *b_copy;
		    void * data_copy;
		    char * data_header;
		    header_t data_header1;
		    size_t size = 1;
		    size_t size_aligned;
		    size_t size_words;
		    b_work = Bigarray_val(work);
		    b_copy = Bigarray_val(copy);
		    for (i = 0; i < b_work->num_dims; i++) {
			size = size * b_work->dim[i];
		    };
		    size = 
			size * 
			caml_ba_element_size[b_work->flags & BIGARRAY_KIND_MASK];

		    size_aligned = size;
		    if (size%sizeof(void *) != 0)
			size_aligned += sizeof(void *) - (size%sizeof(void *));
		    size_words = Wsize_bsize(size_aligned);

		    /* If we put the copy of the bigarray into our own
		       dest buffer, also generate an abstract header,
		       so it can be skipped when iterating over it.

		       We use here a special representation, so we can
		       encode any length in this header (with a normal
		       Ocaml header we are limited by Max_wosize, e.g.
		       16M on 32 bit systems). The special representation
		       is an Abstract_tag with zero length, followed
		       by the real length (in words)
		    */
		    
		    if (enable_bigarrays == 2) {
			data_header = dest_cur;
			dest_cur += 2*sizeof(void *);
			data_copy = dest_cur;
			dest_cur += size_aligned;
		    } else if (!simulation) {
			data_header = NULL;
			data_copy = stat_alloc(size_aligned);
		    };

		    if (!simulation) {
			if (dest_cur > dest_end) return (-4);

			/* Initialize header: */
			
			if (data_header != NULL) {
			    data_header1 = Abstract_tag;
			    memcpy(data_header, 
				   (char *) &data_header1,
				   sizeof(header_t));
			    memcpy(data_header + sizeof(header_t),
				   (size_t *) &size_words,
				   sizeof(size_t));
			};

			/* Copy bigarray: */
			
			memcpy(data_copy, b_work->data, size);
			b_copy->data = data_copy;
			b_copy->proxy = NULL;

			/* If the copy is in our own buffer, it is
			   now externally managed.
			*/
			b_copy->flags = 
			    (b_copy->flags & ~CAML_BA_MANAGED_MASK) |
			    (enable_bigarrays == 2 ? 
			     CAML_BA_EXTERNAL :
			     CAML_BA_MANAGED);
		    }
		}

	    } /* if (work_tag < No_scan_tag) */
	} /* if (copy_addr == NULL) */

	/* Switch to next address in q: */
    next:
	code = netsys_queue_take(q, &work_addr);
    } /* while */
    
    /* Second pass. The copied blocks still have fields pointing to the
       original blocks. We fix that now by iterating once over the copied
       memory block.
    */

    if (!simulation) {
	/* fprintf(stderr, "second pass\n"); */
	dest_ptr = dest;
	while (dest_ptr < dest_cur) {
	    copy_header1 = *((header_t *) dest_ptr);
	    copy_tag = Tag_hd(copy_header1);
	    copy_words = Wosize_hd(copy_header1);
	    copy = (value) (dest_ptr + sizeof(void *));
	    
	    if (copy_tag < No_scan_tag) {
		for (i=0; i < copy_words; ++i) {
		    value field = Field(copy, i);
		    if (Is_block (field)) {
			/* It is a pointer. Try to fix it up. */
			code = netsys_htab_lookup(t, (void *) field,
						  &fixup_addr);
			if (code != 0) return code;

			if (fixup_addr != NULL)
			    Field(copy,i) = 
				(value) (((char *) fixup_addr) + addr_delta);
		    }
		}
	    }
	    else if (copy_tag == Abstract_tag && copy_words == 0) {
		/* our special representation for skipping data regions */
		copy_words = ((size_t *) dest_ptr)[1] + 1;
	    };
	    
	    dest_ptr += (copy_words + 1) * sizeof(void *);
	}
    }	

    /* hey, fine. Return result */
    *start_offset = sizeof(void *);
    *bytelen = dest_cur - dest;

    /* fprintf(stderr, "return regularly\n");*/

    return 0;
}
Пример #27
0
/* Make sure the minor heap is empty by performing a minor collection if
 * needed. */
void caml_empty_minor_heap (void)
{
  uintnat minor_allocated_bytes = caml_domain_state->young_end - caml_domain_state->young_ptr;
  unsigned rewritten = 0;
  struct caml_ref_entry *r;

  caml_save_stack_gc();

  stat_live_bytes = 0;

  if (minor_allocated_bytes != 0){
    caml_gc_log ("Minor collection starting");
    caml_do_local_roots(&caml_oldify_one, caml_domain_self());

    for (r = caml_domain_state->remembered_set->ref.base; r < caml_domain_state->remembered_set->ref.ptr; r++){
      value x;
      caml_oldify_one (Op_val(r->obj)[r->field], &x);
    }

    for (r = caml_domain_state->remembered_set->fiber_ref.base; r < caml_domain_state->remembered_set->fiber_ref.ptr; r++) {
      caml_scan_dirty_stack(&caml_oldify_one, r->obj);
    }

    caml_oldify_mopup ();

    for (r = caml_domain_state->remembered_set->ref.base; r < caml_domain_state->remembered_set->ref.ptr; r++){
      value v = Op_val(r->obj)[r->field];
      if (Is_block(v) && Is_young(v)) {
        Assert (Hp_val (v) >= caml_domain_state->young_ptr);
        value vnew;
        header_t hd = Hd_val(v);
        // FIXME: call oldify_one here?
        if (Is_promoted_hd(hd)) {
          vnew = caml_addrmap_lookup(&caml_domain_state->remembered_set->promotion, v);
        } else {
          int offset = 0;
          if (Tag_hd(hd) == Infix_tag) {
            offset = Infix_offset_hd(hd);
            v -= offset;
          }
          Assert (Hd_val (v) == 0);
          vnew = Op_val(v)[0] + offset;
        }
        Assert(Is_block(vnew) && !Is_young(vnew));
        Assert(Hd_val(vnew));
        if (Tag_hd(hd) == Infix_tag) { Assert(Tag_val(vnew) == Infix_tag); }
        rewritten += caml_atomic_cas_field(r->obj, r->field, v, vnew);
      }
    }

    caml_addrmap_iter(&caml_domain_state->remembered_set->promotion, unpin_promoted_object);

    if (caml_domain_state->young_ptr < caml_domain_state->young_start)
      caml_domain_state->young_ptr = caml_domain_state->young_start;
    caml_stat_minor_words += Wsize_bsize (minor_allocated_bytes);
    caml_domain_state->young_ptr = caml_domain_state->young_end;
    clear_table (&caml_domain_state->remembered_set->ref);
    caml_addrmap_clear(&caml_domain_state->remembered_set->promotion);
    caml_addrmap_clear(&caml_domain_state->remembered_set->promotion_rev);
    caml_gc_log ("Minor collection completed: %u of %u kb live, %u pointers rewritten",
                 (unsigned)stat_live_bytes/1024, (unsigned)minor_allocated_bytes/1024, rewritten);
  }

  for (r = caml_domain_state->remembered_set->fiber_ref.base; r < caml_domain_state->remembered_set->fiber_ref.ptr; r++) {
    caml_scan_dirty_stack(&caml_darken, r->obj);
    caml_clean_stack(r->obj);
  }
  clear_table (&caml_domain_state->remembered_set->fiber_ref);

  caml_restore_stack_gc();

#ifdef DEBUG
  {
    value *p;
    for (p = (value *) caml_domain_state->young_start;
         p < (value *) caml_domain_state->young_end; ++p){
      *p = Debug_free_minor;
    }
    ++ minor_gc_counter;
  }
#endif
}
Пример #28
0
static void do_compaction_r (CAML_R)
{
  char *ch, *chend;
                                          Assert (caml_gc_phase == Phase_idle);
  caml_gc_message (0x10, "Compacting heap...\n", 0);

#ifdef DEBUG
  caml_heap_check_r (ctx);
#endif

  /* First pass: encode all noninfix headers. */
  {
    ch = caml_heap_start;
    while (ch != NULL){
      header_t *p = (header_t *) ch;

      chend = ch + Chunk_size (ch);
      while ((char *) p < chend){
        header_t hd = Hd_hp (p);
        mlsize_t sz = Wosize_hd (hd);

        if (Is_blue_hd (hd)){
          /* Free object.  Give it a string tag. */
          Hd_hp (p) = Make_ehd (sz, String_tag, 3);
        }else{                                      Assert (Is_white_hd (hd));
          /* Live object.  Keep its tag. */
          Hd_hp (p) = Make_ehd (sz, Tag_hd (hd), 3);
        }
        p += Whsize_wosize (sz);
      }
      ch = Chunk_next (ch);
    }
  }


  /* Second pass: invert pointers.
     Link infix headers in each block in an inverted list of inverted lists.
     Don't forget roots and weak pointers. */
  {
    /* Invert roots first because the threads library needs some heap
       data structures to find its roots.  Fortunately, it doesn't need
       the headers (see above). */
    caml_do_roots_r (ctx, invert_root_r);
    caml_final_do_weak_roots_r (ctx, invert_root_r);

    ch = caml_heap_start;
    while (ch != NULL){
      word *p = (word *) ch;
      chend = ch + Chunk_size (ch);

      while ((char *) p < chend){
        word q = *p;
        size_t sz, i;
        tag_t t;
        word *infixes;

        while (Ecolor (q) == 0) q = * (word *) q;
        sz = Whsize_ehd (q);
        t = Tag_ehd (q);

        if (t == Infix_tag){
          /* Get the original header of this block. */
          infixes = p + sz;
          q = *infixes;
          while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3);
          sz = Whsize_ehd (q);
          t = Tag_ehd (q);
        }

        if (t < No_scan_tag){
          for (i = 1; i < sz; i++) invert_pointer_at_r (ctx, &(p[i]));
        }
        p += sz;
      }
      ch = Chunk_next (ch);
    }
    /* Invert weak pointers. */
    {
      value *pp = &caml_weak_list_head;
      value p;
      word q;
      size_t sz, i;

      while (1){
        p = *pp;
        if (p == (value) NULL) break;
        q = Hd_val (p);
        while (Ecolor (q) == 0) q = * (word *) q;
        sz = Wosize_ehd (q);
        for (i = 1; i < sz; i++){
          if (Field (p,i) != caml_weak_none){
            invert_pointer_at_r (ctx, (word *) &(Field (p,i)));
          }
        }
        invert_pointer_at_r (ctx, (word *) pp);
        pp = &Field (p, 0);
      }
    }
  }


  /* Third pass: reallocate virtually; revert pointers; decode headers.
     Rebuild infix headers. */
  {
    init_compact_allocate_r (ctx);
    ch = caml_heap_start;
    while (ch != NULL){
      word *p = (word *) ch;

      chend = ch + Chunk_size (ch);
      while ((char *) p < chend){
        word q = *p;

        if (Ecolor (q) == 0 || Tag_ehd (q) == Infix_tag){
          /* There were (normal or infix) pointers to this block. */
          size_t sz;
          tag_t t;
          char *newadr;
          word *infixes = NULL;

          while (Ecolor (q) == 0) q = * (word *) q;
          sz = Whsize_ehd (q);
          t = Tag_ehd (q);

          if (t == Infix_tag){
            /* Get the original header of this block. */
            infixes = p + sz;
            q = *infixes;                             Assert (Ecolor (q) == 2);
            while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3);
            sz = Whsize_ehd (q);
            t = Tag_ehd (q);
          }

          newadr = compact_allocate_r (ctx, Bsize_wsize (sz));
          q = *p;
          while (Ecolor (q) == 0){
            word next = * (word *) q;
            * (word *) q = (word) Val_hp (newadr);
            q = next;
          }
          *p = Make_header (Wosize_whsize (sz), t, Caml_white);

          if (infixes != NULL){
            /* Rebuild the infix headers and revert the infix pointers. */
            while (Ecolor ((word) infixes) != 3){
              infixes = (word *) ((word) infixes & ~(uintnat) 3);
              q = *infixes;
              while (Ecolor (q) == 2){
                word next;
                q = (word) q & ~(uintnat) 3;
                next = * (word *) q;
                * (word *) q = (word) Val_hp ((word *) newadr + (infixes - p));
                q = next;
              }                    Assert (Ecolor (q) == 1 || Ecolor (q) == 3);
              *infixes = Make_header (infixes - p, Infix_tag, Caml_white);
              infixes = (word *) q;
            }
          }
          p += sz;
        }else{                                        Assert (Ecolor (q) == 3);
          /* This is guaranteed only if caml_compact_heap was called after a
             nonincremental major GC:       Assert (Tag_ehd (q) == String_tag);
          */
          /* No pointers to the header and no infix header:
             the object was free. */
          *p = Make_header (Wosize_ehd (q), Tag_ehd (q), Caml_blue);
          p += Whsize_ehd (q);
        }
      }
      ch = Chunk_next (ch);
    }
  }


  /* Fourth pass: reallocate and move objects.
     Use the exact same allocation algorithm as pass 3. */
  {
    init_compact_allocate_r (ctx);
    ch = caml_heap_start;
    while (ch != NULL){
      word *p = (word *) ch;

      chend = ch + Chunk_size (ch);
      while ((char *) p < chend){
        word q = *p;
        if (Color_hd (q) == Caml_white){
          size_t sz = Bhsize_hd (q);
          char *newadr = compact_allocate_r (ctx, sz);
          memmove (newadr, p, sz);
          p += Wsize_bsize (sz);
        }else{
          Assert (Color_hd (q) == Caml_blue);
          p += Whsize_hd (q);
        }
      }
      ch = Chunk_next (ch);
    }
  }

  /* Shrink the heap if needed. */
  {
    /* Find the amount of live data and the unshrinkable free space. */
    asize_t live = 0;
    asize_t free = 0;
    asize_t wanted;

    ch = caml_heap_start;
    while (ch != NULL){
      if (Chunk_alloc (ch) != 0){
        live += Wsize_bsize (Chunk_alloc (ch));
        free += Wsize_bsize (Chunk_size (ch) - Chunk_alloc (ch));
      }
      ch = Chunk_next (ch);
    }

    /* Add up the empty chunks until there are enough, then remove the
       other empty chunks. */
    wanted = caml_percent_free * (live / 100 + 1);
    ch = caml_heap_start;
    while (ch != NULL){
      char *next_chunk = Chunk_next (ch);  /* Chunk_next (ch) will be erased */

      if (Chunk_alloc (ch) == 0){
        if (free < wanted){
          free += Wsize_bsize (Chunk_size (ch));
        }else{
          caml_shrink_heap_r (ctx, ch);
        }
      }
      ch = next_chunk;
    }
  }

  /* Rebuild the free list. */
  {
    ch = caml_heap_start;
    caml_fl_reset_r (ctx);
    while (ch != NULL){
      if (Chunk_size (ch) > Chunk_alloc (ch)){
        caml_make_free_blocks_r (ctx, (value *) (ch + Chunk_alloc (ch)),
                                 Wsize_bsize (Chunk_size(ch)-Chunk_alloc(ch)), 1,
                                 Caml_white);
      }
      ch = Chunk_next (ch);
    }
  }
  ++ caml_stat_compactions;
  caml_gc_message (0x10, "done.\n", 0);
}
Пример #29
0
void caml_compact_heap_r (CAML_R)
{
  uintnat target_words, target_size, live;

  do_compaction_r (ctx);
  /* Compaction may fail to shrink the heap to a reasonable size
     because it deals in complete chunks: if a very large chunk
     is at the beginning of the heap, everything gets moved to
     it and it is not freed.

     In that case, we allocate a new chunk of the desired heap
     size, chain it at the beginning of the heap (thus pretending
     its address is smaller), and launch a second compaction.
     This will move all data to this new chunk and free the
     very large chunk.

     See PR#5389
  */
  /* We compute:
     freewords = caml_fl_cur_size                  (exact)
     heapwords = Wsize_bsize (caml_heap_size)      (exact)
     live = heapwords - freewords
     wanted = caml_percent_free * (live / 100 + 1) (same as in do_compaction)
     target_words = live + wanted
     We add one page to make sure a small difference in counting sizes
     won't make [do_compaction] keep the second block (and break all sorts
     of invariants).

     We recompact if target_size < heap_size / 2
  */
  live = Wsize_bsize (caml_stat_heap_size) - caml_fl_cur_size;
  target_words = live + caml_percent_free * (live / 100 + 1)
                 + Wsize_bsize (Page_size);
  target_size = caml_round_heap_chunk_size_r (ctx, Bsize_wsize (target_words));
  if (target_size < caml_stat_heap_size / 2){
    char *chunk;

    caml_gc_message (0x10, "Recompacting heap (target=%luk)\n",
                     target_size / 1024);

    chunk = caml_alloc_for_heap (target_size);
    if (chunk == NULL) return;
    /* PR#5757: we need to make the new blocks blue, or they won't be
       recognized as free by the recompaction. */
    caml_make_free_blocks_r (ctx, (value *) chunk,
                           Wsize_bsize (Chunk_size (chunk)), 0, Caml_blue);
    if (caml_page_table_add_r (ctx, In_heap, chunk, chunk + Chunk_size (chunk)) != 0){
      caml_free_for_heap (chunk);
      return;
    }
    Chunk_next (chunk) = caml_heap_start;
    caml_heap_start = chunk;
    ++ caml_stat_heap_chunks;
    caml_stat_heap_size += Chunk_size (chunk);
    if (caml_stat_heap_size > caml_stat_top_heap_size){
      caml_stat_top_heap_size = caml_stat_heap_size;
    }
    do_compaction_r (ctx);
    Assert (caml_stat_heap_chunks == 1);
    Assert (Chunk_next (caml_heap_start) == NULL);
    Assert (caml_stat_heap_size == Chunk_size (chunk));
  }
}
Пример #30
0
/* Check the heap structure (if compiled in debug mode) and
   gather statistics; return the stats if [returnstats] is true,
   otherwise return [Val_unit].
*/
static value heap_stats (int returnstats)
{
    CAMLparam0 ();
    intnat live_words = 0, live_blocks = 0,
           free_words = 0, free_blocks = 0, largest_free = 0,
           fragments = 0, heap_chunks = 0;
    char *chunk = caml_heap_start, *chunk_end;
    char *cur_hp/*, *prev_hp*/;
    header_t cur_hd;

#ifdef DEBUG
    caml_gc_message (-1, "### O'Caml runtime: heap check ###\n", 0);
#endif

    while (chunk != NULL) {
        ++ heap_chunks;
        chunk_end = chunk + Chunk_size (chunk);
        /*prev_hp = NULL;*/
        cur_hp = chunk;
        while (cur_hp < chunk_end) {
            cur_hd = Hd_hp (cur_hp);
            Assert (Next (cur_hp) <= chunk_end);
            switch (Color_hd (cur_hd)) {
            case Caml_white:
                if (Wosize_hd (cur_hd) == 0) {
                    ++ fragments;
                    Assert (/*prev_hp == NULL
                  ||*/ Color_hp (prev_hp) != Caml_blue
                        || cur_hp == caml_gc_sweep_hp);
                } else {
                    if (caml_gc_phase == Phase_sweep && cur_hp >= caml_gc_sweep_hp) {
                        ++ free_blocks;
                        free_words += Whsize_hd (cur_hd);
                        if (Whsize_hd (cur_hd) > largest_free) {
                            largest_free = Whsize_hd (cur_hd);
                        }
                    } else {
                        ++ live_blocks;
                        live_words += Whsize_hd (cur_hd);
#ifdef DEBUG
                        check_block (cur_hp);
#endif
                    }
                }
                break;
            case Caml_gray:
            case Caml_black:
                Assert (Wosize_hd (cur_hd) > 0);
                ++ live_blocks;
                live_words += Whsize_hd (cur_hd);
#ifdef DEBUG
                check_block (cur_hp);
#endif
                break;
            case Caml_blue:
                Assert (Wosize_hd (cur_hd) > 0);
                ++ free_blocks;
                free_words += Whsize_hd (cur_hd);
                if (Whsize_hd (cur_hd) > largest_free) {
                    largest_free = Whsize_hd (cur_hd);
                }
                /* not true any more with big heap chunks
                Assert (prev_hp == NULL
                        || (Color_hp (prev_hp) != Caml_blue && Wosize_hp (prev_hp) > 0)
                        || cur_hp == caml_gc_sweep_hp);
                Assert (Next (cur_hp) == chunk_end
                        || (Color_hp (Next (cur_hp)) != Caml_blue
                            && Wosize_hp (Next (cur_hp)) > 0)
                        || (Whsize_hd (cur_hd) + Wosize_hp (Next (cur_hp)) > Max_wosize)
                        || Next (cur_hp) == caml_gc_sweep_hp);
                */
                break;
            }
            /*prev_hp = cur_hp;*/
            cur_hp = Next (cur_hp);
        }
        Assert (cur_hp == chunk_end);
        chunk = Chunk_next (chunk);
    }

    Assert (heap_chunks == caml_stat_heap_chunks);
    Assert (live_words + free_words + fragments
            == Wsize_bsize (caml_stat_heap_size));

    if (returnstats) {
        CAMLlocal1 (res);

        /* get a copy of these before allocating anything... */
        double minwords = caml_stat_minor_words
                          + (double) Wsize_bsize (caml_young_end - caml_young_ptr);
        double prowords = caml_stat_promoted_words;
        double majwords = caml_stat_major_words + (double) caml_allocated_words;
        intnat mincoll = caml_stat_minor_collections;
        intnat majcoll = caml_stat_major_collections;
        intnat heap_words = Wsize_bsize (caml_stat_heap_size);
        intnat cpct = caml_stat_compactions;
        intnat top_heap_words = Wsize_bsize (caml_stat_top_heap_size);

        res = caml_alloc_tuple (15);
        Store_field (res, 0, caml_copy_double (minwords));
        Store_field (res, 1, caml_copy_double (prowords));
        Store_field (res, 2, caml_copy_double (majwords));
        Store_field (res, 3, Val_long (mincoll));
        Store_field (res, 4, Val_long (majcoll));
        Store_field (res, 5, Val_long (heap_words));
        Store_field (res, 6, Val_long (heap_chunks));
        Store_field (res, 7, Val_long (live_words));
        Store_field (res, 8, Val_long (live_blocks));
        Store_field (res, 9, Val_long (free_words));
        Store_field (res, 10, Val_long (free_blocks));
        Store_field (res, 11, Val_long (largest_free));
        Store_field (res, 12, Val_long (fragments));
        Store_field (res, 13, Val_long (cpct));
        Store_field (res, 14, Val_long (top_heap_words));
        CAMLreturn (res);
    } else {
        CAMLreturn (Val_unit);
    }
}