Пример #1
0
/* Take a chunk of memory as argument, which must be the result of a
   call to [caml_alloc_for_heap], and insert it into the heap chaining.
   The contents of the chunk must be a sequence of valid blocks and
   fragments: no space between blocks and no trailing garbage.  If
   some blocks are blue, they must be added to the free list by the
   caller.  All other blocks must have the color [caml_allocation_color(m)].
   The caller must update [caml_allocated_words] if applicable.
   Return value: 0 if no error; -1 in case of error.

   See also: caml_compact_heap, which duplicates most of this function.
*/
int caml_add_to_heap (char *m)
{
                                     Assert (Chunk_size (m) % Page_size == 0);
#ifdef DEBUG
  /* Should check the contents of the block. */
#endif /* debug */

  caml_gc_message (0x04, "Growing heap to %luk bytes\n",
                   (caml_stat_heap_size + Chunk_size (m)) / 1024);

  /* Register block in page table */
  if (caml_page_table_add(In_heap, m, m + Chunk_size(m)) != 0)
    return -1;

  /* Chain this heap chunk. */
  {
    char **last = &caml_heap_start;
    char *cur = *last;

    while (cur != NULL && cur < m){
      last = &(Chunk_next (cur));
      cur = *last;
    }
    Chunk_next (m) = cur;
    *last = m;

    ++ caml_stat_heap_chunks;
  }

  caml_stat_heap_size += Chunk_size (m);
  if (caml_stat_heap_size > caml_stat_top_heap_size){
    caml_stat_top_heap_size = caml_stat_heap_size;
  }
  return 0;
}
Пример #2
0
void caml_compact_heap (void)
{
    uintnat target_size, live;

    do_compaction ();
    /* Compaction may fail to shrink the heap to a reasonable size
       because it deals in complete chunks: if a very large chunk
       is at the beginning of the heap, everything gets moved to
       it and it is not freed.

       In that case, we allocate a new chunk of the desired heap
       size, chain it at the beginning of the heap (thus pretending
       its address is smaller), and launch a second compaction.
       This will move all data to this new chunk and free the
       very large chunk.

       See PR#5389
    */
    /* We compute:
       freewords = caml_fl_cur_size          (exact)
       heapsize = caml_heap_size             (exact)
       live = heap_size - freewords
       target_size = live * (1 + caml_percent_free / 100)
                   = live / 100 * (100 + caml_percent_free)
       We add 1 to live/100 to make sure it isn't 0.

       We recompact if target_size < heap_size / 2
    */
    live = caml_stat_heap_size - Bsize_wsize (caml_fl_cur_size);
    target_size = (live / 100 + 1) * (100 + caml_percent_free);
    target_size = caml_round_heap_chunk_size (target_size);
    if (target_size < caml_stat_heap_size / 2) {
        char *chunk;

        /* round it up to a page size */
        chunk = caml_alloc_for_heap (target_size);
        if (chunk == NULL) return;
        caml_make_free_blocks ((value *) chunk,
                               Wsize_bsize (Chunk_size (chunk)), 0);
        if (caml_page_table_add (In_heap, chunk, chunk + Chunk_size (chunk)) != 0) {
            caml_free_for_heap (chunk);
            return;
        }
        Chunk_next (chunk) = caml_heap_start;
        caml_heap_start = chunk;
        ++ caml_stat_heap_chunks;
        caml_stat_heap_size += Chunk_size (chunk);
        if (caml_stat_heap_size > caml_stat_top_heap_size) {
            caml_stat_top_heap_size = caml_stat_heap_size;
        }
        do_compaction ();
        Assert (caml_stat_heap_chunks == 1);
        Assert (Chunk_next (caml_heap_start) == NULL);
    }
}
Пример #3
0
/* Use this function to free a block allocated with [caml_alloc_for_heap]
   if you don't add it with [caml_add_to_heap].
*/
void caml_free_for_heap (char *mem)
{
#ifdef USE_MMAP_INSTEAD_OF_MALLOC
  caml_aligned_munmap (Chunk_block (mem),
                       Chunk_size (mem) + sizeof (heap_chunk_head));
#else
  free (Chunk_block (mem));
#endif
}
Пример #4
0
static char *compact_allocate_r (CAML_R, mlsize_t size)
                                      /* in bytes, including header */
{
  char *chunk, *adr;

  while (Chunk_size (compact_fl) - Chunk_alloc (compact_fl) <= Bhsize_wosize (3)
         && Chunk_size (Chunk_next (compact_fl))
            - Chunk_alloc (Chunk_next (compact_fl))
            <= Bhsize_wosize (3)){
    compact_fl = Chunk_next (compact_fl);
  }
  chunk = compact_fl;
  while (Chunk_size (chunk) - Chunk_alloc (chunk) < size){
    chunk = Chunk_next (chunk);                         Assert (chunk != NULL);
  }
  adr = chunk + Chunk_alloc (chunk);
  Chunk_alloc (chunk) += size;
  return adr;
}
Пример #5
0
/* Remove the heap chunk [chunk] from the heap and give the memory back
   to [free].
*/
void caml_shrink_heap (char *chunk)
{
  char **cp;
  asize_t i;

  /* Never deallocate the first block, because caml_heap_start is both the
     first block and the base address for page numbers, and we don't
     want to shift the page table, it's too messy (see above).
     It will never happen anyway, because of the way compaction works.
     (see compact.c)
  */
  if (chunk == caml_heap_start) return;

  caml_stat_heap_size -= Chunk_size (chunk);
  caml_gc_message (0x04, "Shrinking heap to %luk bytes\n",
                   caml_stat_heap_size / 1024);

#ifdef DEBUG
  {
    mlsize_t i;
    for (i = 0; i < Wsize_bsize (Chunk_size (chunk)); i++){
      ((value *) chunk) [i] = Debug_free_shrink;
    }
  }
#endif

  -- caml_stat_heap_chunks;

  /* Remove [chunk] from the list of chunks. */
  cp = &caml_heap_start;
  while (*cp != chunk) cp = &(Chunk_next (*cp));
  *cp = Chunk_next (chunk);

  /* Remove the pages of [chunk] from the page table. */
  for (i = Page (chunk); i < Page (chunk + Chunk_size (chunk)); i++){
    caml_page_table [i] = Not_in_heap;
  }

  /* Free the [malloc] block that contains [chunk]. */
  caml_free_for_heap (chunk);
}
Пример #6
0
/* Allocate a block of the requested size, to be passed to
   [caml_add_to_heap] later.
   [request] must be a multiple of [Page_size].
   [caml_alloc_for_heap] returns NULL if the request cannot be satisfied.
   The returned pointer is a hp, but the header must be initialized by
   the caller.
*/
char *caml_alloc_for_heap (asize_t request)
{
  char *mem;
  void *block;
                                              Assert (request % Page_size == 0);
  mem = caml_aligned_malloc (request + sizeof (heap_chunk_head),
                             sizeof (heap_chunk_head), &block);
  if (mem == NULL) return NULL;
  mem += sizeof (heap_chunk_head);
  Chunk_size (mem) = request;
  Chunk_block (mem) = block;
  return mem;
}
Пример #7
0
/* Remove the heap chunk [chunk] from the heap and give the memory back
   to [free].
*/
void caml_shrink_heap (char *chunk)
{
  char **cp;

  /* Never deallocate the first chunk, because caml_heap_start is both the
     first block and the base address for page numbers, and we don't
     want to shift the page table, it's too messy (see above).
     It will never happen anyway, because of the way compaction works.
     (see compact.c)
     XXX FIXME this has become false with the fix to PR#5389 (see compact.c)
  */
  if (chunk == caml_heap_start) return;

  caml_stat_heap_wsz -= Wsize_bsize (Chunk_size (chunk));
  caml_gc_message (0x04, "Shrinking heap to %luk words\n",
                   (unsigned long) caml_stat_heap_wsz / 1024);

#ifdef DEBUG
  {
    mlsize_t i;
    for (i = 0; i < Wsize_bsize (Chunk_size (chunk)); i++){
      ((value *) chunk) [i] = Debug_free_shrink;
    }
  }
#endif

  -- caml_stat_heap_chunks;

  /* Remove [chunk] from the list of chunks. */
  cp = &caml_heap_start;
  while (*cp != chunk) cp = &(Chunk_next (*cp));
  *cp = Chunk_next (chunk);

  /* Remove the pages of [chunk] from the page table. */
  caml_page_table_remove(In_heap, chunk, chunk + Chunk_size (chunk));

  /* Free the [malloc] block that contains [chunk]. */
  caml_free_for_heap (chunk);
}
Пример #8
0
/* Allocate a block of the requested size, to be passed to
   [caml_add_to_heap] later.
   [request] must be a multiple of [Page_size].
   [caml_alloc_for_heap] returns NULL if the request cannot be satisfied.
   The returned pointer is a hp, but the header must be initialized by
   the caller.
*/
char *caml_alloc_for_heap (asize_t request)
{
  char *mem;
  void *block;
                                              Assert (request % Page_size == 0);
#ifdef USE_MMAP_INSTEAD_OF_MALLOC
  mem = caml_aligned_mmap (request + sizeof (heap_chunk_head),
                           sizeof (heap_chunk_head), &block);
#else
  mem = caml_aligned_malloc (request + sizeof (heap_chunk_head),
                             sizeof (heap_chunk_head), &block);
#endif
  if (mem == NULL) return NULL;
  mem += sizeof (heap_chunk_head);
  Chunk_size (mem) = request;
  Chunk_block (mem) = block;
  return mem;
}
Пример #9
0
static void ocamlpool_chunk_alloc(void)
{
  ocamlpool_assert(ocamlpool_next_chunk_size > 1);
  void *block = caml_alloc_for_heap(ocamlpool_next_chunk_size * WORD_SIZE);
  abort_unless(block != NULL);

  size_t chunk_size = Chunk_size(block);
  ocamlpool_assert(IS_WORD_ALIGNED(chunk_size));

  ocamlpool_color = caml_allocation_color(block);
  ocamlpool_allocated_chunks_counter += 1;

  size_t words = (chunk_size / WORD_SIZE);

  ocamlpool_root = (value)((value*)block + 1);
  /* Make it look like a well-formed string */
  OCAMLPOOL_SET_HEADER(ocamlpool_root, words - 1, String_tag, ocamlpool_color);
  caml_add_to_heap(block);

  caml_allocated_words += words;
}
Пример #10
0
/* Check the heap structure (if compiled in debug mode) and
   gather statistics; return the stats if [returnstats] is true,
   otherwise return [Val_unit].
*/
static value heap_stats (int returnstats)
{
  CAMLparam0 ();
  intnat live_words = 0, live_blocks = 0,
         free_words = 0, free_blocks = 0, largest_free = 0,
         fragments = 0, heap_chunks = 0;
  char *chunk = caml_heap_start, *chunk_end;
  header_t *cur_hp;
#ifdef DEBUG
  header_t *prev_hp;
#endif
  header_t cur_hd;

#ifdef DEBUG
  caml_gc_message (-1, "### OCaml runtime: heap check ###\n", 0);
#endif

  while (chunk != NULL){
    ++ heap_chunks;
    chunk_end = chunk + Chunk_size (chunk);
#ifdef DEBUG
    prev_hp = NULL;
#endif
    cur_hp = (header_t *) chunk;
    while (cur_hp < (header_t *) chunk_end){
      cur_hd = Hd_hp (cur_hp);
      Assert (Next (cur_hp) <= (header_t *) chunk_end);
      switch (Color_hd (cur_hd)){
      case Caml_white:
        if (Wosize_hd (cur_hd) == 0){
          ++ fragments;
          Assert (prev_hp == NULL
                  || Color_hp (prev_hp) != Caml_blue
                  || cur_hp == (header_t *) caml_gc_sweep_hp);
        }else{
          if (caml_gc_phase == Phase_sweep
              && cur_hp >= (header_t *) caml_gc_sweep_hp){
            ++ free_blocks;
            free_words += Whsize_hd (cur_hd);
            if (Whsize_hd (cur_hd) > largest_free){
              largest_free = Whsize_hd (cur_hd);
            }
          }else{
            ++ live_blocks;
            live_words += Whsize_hd (cur_hd);
#ifdef DEBUG
            check_block (cur_hp);
#endif
          }
        }
        break;
      case Caml_gray: case Caml_black:
        Assert (Wosize_hd (cur_hd) > 0);
        ++ live_blocks;
        live_words += Whsize_hd (cur_hd);
#ifdef DEBUG
        check_block (cur_hp);
#endif
        break;
      case Caml_blue:
        Assert (Wosize_hd (cur_hd) > 0);
        ++ free_blocks;
        free_words += Whsize_hd (cur_hd);
        if (Whsize_hd (cur_hd) > largest_free){
          largest_free = Whsize_hd (cur_hd);
        }
        /* not true any more with big heap chunks
        Assert (prev_hp == NULL
                || (Color_hp (prev_hp) != Caml_blue && Wosize_hp (prev_hp) > 0)
                || cur_hp == caml_gc_sweep_hp);
        Assert (Next (cur_hp) == chunk_end
                || (Color_hp (Next (cur_hp)) != Caml_blue
                    && Wosize_hp (Next (cur_hp)) > 0)
                || (Whsize_hd (cur_hd) + Wosize_hp (Next (cur_hp)) > Max_wosize)
                || Next (cur_hp) == caml_gc_sweep_hp);
        */
        break;
      }
#ifdef DEBUG
      prev_hp = cur_hp;
#endif
      cur_hp = Next (cur_hp);
    }                             Assert (cur_hp == (header_t *) chunk_end);
    chunk = Chunk_next (chunk);
  }

  Assert (heap_chunks == caml_stat_heap_chunks);
  Assert (live_words + free_words + fragments == caml_stat_heap_wsz);

  if (returnstats){
    CAMLlocal1 (res);

    /* get a copy of these before allocating anything... */
    double minwords = caml_stat_minor_words
                      + (double) (caml_young_alloc_end - caml_young_ptr);
    double prowords = caml_stat_promoted_words;
    double majwords = caml_stat_major_words + (double) caml_allocated_words;
    intnat mincoll = caml_stat_minor_collections;
    intnat majcoll = caml_stat_major_collections;
    intnat heap_words = caml_stat_heap_wsz;
    intnat cpct = caml_stat_compactions;
    intnat top_heap_words = caml_stat_top_heap_wsz;

    res = caml_alloc_tuple (16);
    Store_field (res, 0, caml_copy_double (minwords));
    Store_field (res, 1, caml_copy_double (prowords));
    Store_field (res, 2, caml_copy_double (majwords));
    Store_field (res, 3, Val_long (mincoll));
    Store_field (res, 4, Val_long (majcoll));
    Store_field (res, 5, Val_long (heap_words));
    Store_field (res, 6, Val_long (heap_chunks));
    Store_field (res, 7, Val_long (live_words));
    Store_field (res, 8, Val_long (live_blocks));
    Store_field (res, 9, Val_long (free_words));
    Store_field (res, 10, Val_long (free_blocks));
    Store_field (res, 11, Val_long (largest_free));
    Store_field (res, 12, Val_long (fragments));
    Store_field (res, 13, Val_long (cpct));
    Store_field (res, 14, Val_long (top_heap_words));
    Store_field (res, 15, Val_long (caml_stack_usage()));
    CAMLreturn (res);
  }else{
    CAMLreturn (Val_unit);
  }
}
Пример #11
0
void caml_compact_heap_r (CAML_R)
{
  uintnat target_words, target_size, live;

  do_compaction_r (ctx);
  /* Compaction may fail to shrink the heap to a reasonable size
     because it deals in complete chunks: if a very large chunk
     is at the beginning of the heap, everything gets moved to
     it and it is not freed.

     In that case, we allocate a new chunk of the desired heap
     size, chain it at the beginning of the heap (thus pretending
     its address is smaller), and launch a second compaction.
     This will move all data to this new chunk and free the
     very large chunk.

     See PR#5389
  */
  /* We compute:
     freewords = caml_fl_cur_size                  (exact)
     heapwords = Wsize_bsize (caml_heap_size)      (exact)
     live = heapwords - freewords
     wanted = caml_percent_free * (live / 100 + 1) (same as in do_compaction)
     target_words = live + wanted
     We add one page to make sure a small difference in counting sizes
     won't make [do_compaction] keep the second block (and break all sorts
     of invariants).

     We recompact if target_size < heap_size / 2
  */
  live = Wsize_bsize (caml_stat_heap_size) - caml_fl_cur_size;
  target_words = live + caml_percent_free * (live / 100 + 1)
                 + Wsize_bsize (Page_size);
  target_size = caml_round_heap_chunk_size_r (ctx, Bsize_wsize (target_words));
  if (target_size < caml_stat_heap_size / 2){
    char *chunk;

    caml_gc_message (0x10, "Recompacting heap (target=%luk)\n",
                     target_size / 1024);

    chunk = caml_alloc_for_heap (target_size);
    if (chunk == NULL) return;
    /* PR#5757: we need to make the new blocks blue, or they won't be
       recognized as free by the recompaction. */
    caml_make_free_blocks_r (ctx, (value *) chunk,
                           Wsize_bsize (Chunk_size (chunk)), 0, Caml_blue);
    if (caml_page_table_add_r (ctx, In_heap, chunk, chunk + Chunk_size (chunk)) != 0){
      caml_free_for_heap (chunk);
      return;
    }
    Chunk_next (chunk) = caml_heap_start;
    caml_heap_start = chunk;
    ++ caml_stat_heap_chunks;
    caml_stat_heap_size += Chunk_size (chunk);
    if (caml_stat_heap_size > caml_stat_top_heap_size){
      caml_stat_top_heap_size = caml_stat_heap_size;
    }
    do_compaction_r (ctx);
    Assert (caml_stat_heap_chunks == 1);
    Assert (Chunk_next (caml_heap_start) == NULL);
    Assert (caml_stat_heap_size == Chunk_size (chunk));
  }
}
Пример #12
0
static void do_compaction_r (CAML_R)
{
  char *ch, *chend;
                                          Assert (caml_gc_phase == Phase_idle);
  caml_gc_message (0x10, "Compacting heap...\n", 0);

#ifdef DEBUG
  caml_heap_check_r (ctx);
#endif

  /* First pass: encode all noninfix headers. */
  {
    ch = caml_heap_start;
    while (ch != NULL){
      header_t *p = (header_t *) ch;

      chend = ch + Chunk_size (ch);
      while ((char *) p < chend){
        header_t hd = Hd_hp (p);
        mlsize_t sz = Wosize_hd (hd);

        if (Is_blue_hd (hd)){
          /* Free object.  Give it a string tag. */
          Hd_hp (p) = Make_ehd (sz, String_tag, 3);
        }else{                                      Assert (Is_white_hd (hd));
          /* Live object.  Keep its tag. */
          Hd_hp (p) = Make_ehd (sz, Tag_hd (hd), 3);
        }
        p += Whsize_wosize (sz);
      }
      ch = Chunk_next (ch);
    }
  }


  /* Second pass: invert pointers.
     Link infix headers in each block in an inverted list of inverted lists.
     Don't forget roots and weak pointers. */
  {
    /* Invert roots first because the threads library needs some heap
       data structures to find its roots.  Fortunately, it doesn't need
       the headers (see above). */
    caml_do_roots_r (ctx, invert_root_r);
    caml_final_do_weak_roots_r (ctx, invert_root_r);

    ch = caml_heap_start;
    while (ch != NULL){
      word *p = (word *) ch;
      chend = ch + Chunk_size (ch);

      while ((char *) p < chend){
        word q = *p;
        size_t sz, i;
        tag_t t;
        word *infixes;

        while (Ecolor (q) == 0) q = * (word *) q;
        sz = Whsize_ehd (q);
        t = Tag_ehd (q);

        if (t == Infix_tag){
          /* Get the original header of this block. */
          infixes = p + sz;
          q = *infixes;
          while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3);
          sz = Whsize_ehd (q);
          t = Tag_ehd (q);
        }

        if (t < No_scan_tag){
          for (i = 1; i < sz; i++) invert_pointer_at_r (ctx, &(p[i]));
        }
        p += sz;
      }
      ch = Chunk_next (ch);
    }
    /* Invert weak pointers. */
    {
      value *pp = &caml_weak_list_head;
      value p;
      word q;
      size_t sz, i;

      while (1){
        p = *pp;
        if (p == (value) NULL) break;
        q = Hd_val (p);
        while (Ecolor (q) == 0) q = * (word *) q;
        sz = Wosize_ehd (q);
        for (i = 1; i < sz; i++){
          if (Field (p,i) != caml_weak_none){
            invert_pointer_at_r (ctx, (word *) &(Field (p,i)));
          }
        }
        invert_pointer_at_r (ctx, (word *) pp);
        pp = &Field (p, 0);
      }
    }
  }


  /* Third pass: reallocate virtually; revert pointers; decode headers.
     Rebuild infix headers. */
  {
    init_compact_allocate_r (ctx);
    ch = caml_heap_start;
    while (ch != NULL){
      word *p = (word *) ch;

      chend = ch + Chunk_size (ch);
      while ((char *) p < chend){
        word q = *p;

        if (Ecolor (q) == 0 || Tag_ehd (q) == Infix_tag){
          /* There were (normal or infix) pointers to this block. */
          size_t sz;
          tag_t t;
          char *newadr;
          word *infixes = NULL;

          while (Ecolor (q) == 0) q = * (word *) q;
          sz = Whsize_ehd (q);
          t = Tag_ehd (q);

          if (t == Infix_tag){
            /* Get the original header of this block. */
            infixes = p + sz;
            q = *infixes;                             Assert (Ecolor (q) == 2);
            while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3);
            sz = Whsize_ehd (q);
            t = Tag_ehd (q);
          }

          newadr = compact_allocate_r (ctx, Bsize_wsize (sz));
          q = *p;
          while (Ecolor (q) == 0){
            word next = * (word *) q;
            * (word *) q = (word) Val_hp (newadr);
            q = next;
          }
          *p = Make_header (Wosize_whsize (sz), t, Caml_white);

          if (infixes != NULL){
            /* Rebuild the infix headers and revert the infix pointers. */
            while (Ecolor ((word) infixes) != 3){
              infixes = (word *) ((word) infixes & ~(uintnat) 3);
              q = *infixes;
              while (Ecolor (q) == 2){
                word next;
                q = (word) q & ~(uintnat) 3;
                next = * (word *) q;
                * (word *) q = (word) Val_hp ((word *) newadr + (infixes - p));
                q = next;
              }                    Assert (Ecolor (q) == 1 || Ecolor (q) == 3);
              *infixes = Make_header (infixes - p, Infix_tag, Caml_white);
              infixes = (word *) q;
            }
          }
          p += sz;
        }else{                                        Assert (Ecolor (q) == 3);
          /* This is guaranteed only if caml_compact_heap was called after a
             nonincremental major GC:       Assert (Tag_ehd (q) == String_tag);
          */
          /* No pointers to the header and no infix header:
             the object was free. */
          *p = Make_header (Wosize_ehd (q), Tag_ehd (q), Caml_blue);
          p += Whsize_ehd (q);
        }
      }
      ch = Chunk_next (ch);
    }
  }


  /* Fourth pass: reallocate and move objects.
     Use the exact same allocation algorithm as pass 3. */
  {
    init_compact_allocate_r (ctx);
    ch = caml_heap_start;
    while (ch != NULL){
      word *p = (word *) ch;

      chend = ch + Chunk_size (ch);
      while ((char *) p < chend){
        word q = *p;
        if (Color_hd (q) == Caml_white){
          size_t sz = Bhsize_hd (q);
          char *newadr = compact_allocate_r (ctx, sz);
          memmove (newadr, p, sz);
          p += Wsize_bsize (sz);
        }else{
          Assert (Color_hd (q) == Caml_blue);
          p += Whsize_hd (q);
        }
      }
      ch = Chunk_next (ch);
    }
  }

  /* Shrink the heap if needed. */
  {
    /* Find the amount of live data and the unshrinkable free space. */
    asize_t live = 0;
    asize_t free = 0;
    asize_t wanted;

    ch = caml_heap_start;
    while (ch != NULL){
      if (Chunk_alloc (ch) != 0){
        live += Wsize_bsize (Chunk_alloc (ch));
        free += Wsize_bsize (Chunk_size (ch) - Chunk_alloc (ch));
      }
      ch = Chunk_next (ch);
    }

    /* Add up the empty chunks until there are enough, then remove the
       other empty chunks. */
    wanted = caml_percent_free * (live / 100 + 1);
    ch = caml_heap_start;
    while (ch != NULL){
      char *next_chunk = Chunk_next (ch);  /* Chunk_next (ch) will be erased */

      if (Chunk_alloc (ch) == 0){
        if (free < wanted){
          free += Wsize_bsize (Chunk_size (ch));
        }else{
          caml_shrink_heap_r (ctx, ch);
        }
      }
      ch = next_chunk;
    }
  }

  /* Rebuild the free list. */
  {
    ch = caml_heap_start;
    caml_fl_reset_r (ctx);
    while (ch != NULL){
      if (Chunk_size (ch) > Chunk_alloc (ch)){
        caml_make_free_blocks_r (ctx, (value *) (ch + Chunk_alloc (ch)),
                                 Wsize_bsize (Chunk_size(ch)-Chunk_alloc(ch)), 1,
                                 Caml_white);
      }
      ch = Chunk_next (ch);
    }
  }
  ++ caml_stat_compactions;
  caml_gc_message (0x10, "done.\n", 0);
}
Пример #13
0
value gc_stat (value v) /* ML */
{
  value res;
  long live_words = 0, live_blocks = 0,
       free_words = 0, free_blocks = 0, largest_free = 0,
       fragments = 0, heap_chunks = 0;
  char *chunk = heap_start, *chunk_end;
  char *cur_hp, *prev_hp;
  header_t cur_hd;

  Assert (v == Atom (0));

  while (chunk != NULL){
    ++ heap_chunks;
    chunk_end = chunk + Chunk_size (chunk);
    prev_hp = NULL;
    cur_hp = chunk;
    while (cur_hp < chunk_end){
      cur_hd = Hd_hp (cur_hp);
      switch (Color_hd (cur_hd)){
      case White:
	if (Wosize_hd (cur_hd) == 0){
	  ++fragments;
	  Assert (prev_hp == NULL
		  || (Color_hp (prev_hp) != Blue
		      && Wosize_hp (prev_hp) > 0));
	  Assert (Next (cur_hp) == chunk_end
		  || (Color_hp (Next (cur_hp)) != Blue
		      && Wosize_hp (Next (cur_hp)) > 0));
	  break;
	}
	/* FALLTHROUGH */
      case Gray: case Black:
	Assert (Wosize_hd (cur_hd) > 0);
	++ live_blocks;
	live_words += Whsize_hd (cur_hd);
	break;
      case Blue:
	Assert (Wosize_hd (cur_hd) > 0);
	++ free_blocks;
	free_words += Whsize_hd (cur_hd);
	if (Whsize_hd (cur_hd) > largest_free){
	  largest_free = Whsize_hd (cur_hd);
	}
	Assert (prev_hp == NULL
		|| (Color_hp (prev_hp) != Blue
		    && Wosize_hp (prev_hp) > 0));
	Assert (Next (cur_hp) == chunk_end
		|| (Color_hp (Next (cur_hp)) != Blue
		    && Wosize_hp (Next (cur_hp)) > 0));
	break;
      }
      prev_hp = cur_hp;
      cur_hp = Next (cur_hp);
    }                                          Assert (cur_hp == chunk_end);
    chunk = Chunk_next (chunk);
  }
  
  Assert (live_words + free_words + fragments == Wsize_bsize (stat_heap_size));
  /* Order of elements changed for Moscow ML */
  res = alloc (13, 0);
  Field (res, 11) = Val_long (stat_minor_words
                             + Wsize_bsize (young_ptr - young_start));
  Field (res, 12) = Val_long (stat_promoted_words);
  Field (res,  9) = Val_long (stat_major_words + allocated_words);
  Field (res, 10) = Val_long (stat_minor_collections);
  Field (res,  8) = Val_long (stat_major_collections);
  Field (res,  4) = Val_long (Wsize_bsize (stat_heap_size));
  Field (res,  3) = Val_long (heap_chunks);
  Field (res,  7) = Val_long (live_words);
  Field (res,  6) = Val_long (live_blocks);
  Field (res,  2) = Val_long (free_words);
  Field (res,  1) = Val_long (free_blocks);
  Field (res,  5) = Val_long (largest_free);
  Field (res,  0) = Val_long (fragments);
  return res;
}
Пример #14
0
/* Take a chunk of memory as argument, which must be the result of a
   call to [caml_alloc_for_heap], and insert it into the heap chaining.
   The contents of the chunk must be a sequence of valid blocks and
   fragments: no space between blocks and no trailing garbage.  If
   some blocks are blue, they must be added to the free list by the
   caller.  All other blocks must have the color [caml_allocation_color(mem)].
   The caller must update [caml_allocated_words] if applicable.
   Return value: 0 if no error; -1 in case of error.
*/
int caml_add_to_heap (char *m)
{
  asize_t i;
                                     Assert (Chunk_size (m) % Page_size == 0);
#ifdef DEBUG
  /* Should check the contents of the block. */
#endif /* debug */

  caml_gc_message (0x04, "Growing heap to %luk bytes\n",
                   (caml_stat_heap_size + Chunk_size (m)) / 1024);

  /* Extend the page table as needed. */
  if (Page (m) < caml_page_low){
    page_table_entry *block, *new_page_table;
    asize_t new_page_low = Page (m);
    asize_t new_size = caml_page_high - new_page_low;
    
    caml_gc_message (0x08, "Growing page table to %lu entries\n", new_size);
    block = malloc (new_size * sizeof (page_table_entry));
    if (block == NULL){
      caml_gc_message (0x08, "No room for growing page table\n", 0);
      return -1;
    }
    new_page_table = block - new_page_low;
    for (i = new_page_low; i < caml_page_low; i++){
      new_page_table [i] = Not_in_heap;
    }
    for (i = caml_page_low; i < caml_page_high; i++){
      new_page_table [i] = caml_page_table [i];
    }
    free (caml_page_table + caml_page_low);
    caml_page_table = new_page_table;
    caml_page_low = new_page_low;
  }
  if (Page (m + Chunk_size (m)) > caml_page_high){
    page_table_entry *block, *new_page_table;
    asize_t new_page_high = Page (m + Chunk_size (m));
    asize_t new_size = new_page_high - caml_page_low;
    
    caml_gc_message (0x08, "Growing page table to %lu entries\n", new_size);
    block = malloc (new_size * sizeof (page_table_entry));
    if (block == NULL){
      caml_gc_message (0x08, "No room for growing page table\n", 0);
      return -1;
    }
    new_page_table = block - caml_page_low;
    for (i = caml_page_low; i < caml_page_high; i++){
      new_page_table [i] = caml_page_table [i];
    }
    for (i = caml_page_high; i < new_page_high; i++){
      new_page_table [i] = Not_in_heap;
    }
    free (caml_page_table + caml_page_low);
    caml_page_table = new_page_table;
    caml_page_high = new_page_high;
  }

  /* Mark the pages as being in the heap. */
  for (i = Page (m); i < Page (m + Chunk_size (m)); i++){
    caml_page_table [i] = In_heap;
  }

  /* Chain this heap chunk. */
  {
    char **last = &caml_heap_start;
    char *cur = *last;

    while (cur != NULL && cur < m){
      last = &(Chunk_next (cur));
      cur = *last;
    }
    Chunk_next (m) = cur;
    *last = m;

    ++ caml_stat_heap_chunks;
  }

  /* Update the heap bounds as needed. */
  /* already done:   if (m < caml_heap_start) heap_start = m; */
  if (m + Chunk_size (m) > caml_heap_end) caml_heap_end = m + Chunk_size (m);

  caml_stat_heap_size += Chunk_size (m);
  if (caml_stat_heap_size > caml_stat_top_heap_size){
    caml_stat_top_heap_size = caml_stat_heap_size;
  }
  return 0;
}