Пример #1
0
static void do_compaction_r (CAML_R)
{
  char *ch, *chend;
                                          Assert (caml_gc_phase == Phase_idle);
  caml_gc_message (0x10, "Compacting heap...\n", 0);

#ifdef DEBUG
  caml_heap_check_r (ctx);
#endif

  /* First pass: encode all noninfix headers. */
  {
    ch = caml_heap_start;
    while (ch != NULL){
      header_t *p = (header_t *) ch;

      chend = ch + Chunk_size (ch);
      while ((char *) p < chend){
        header_t hd = Hd_hp (p);
        mlsize_t sz = Wosize_hd (hd);

        if (Is_blue_hd (hd)){
          /* Free object.  Give it a string tag. */
          Hd_hp (p) = Make_ehd (sz, String_tag, 3);
        }else{                                      Assert (Is_white_hd (hd));
          /* Live object.  Keep its tag. */
          Hd_hp (p) = Make_ehd (sz, Tag_hd (hd), 3);
        }
        p += Whsize_wosize (sz);
      }
      ch = Chunk_next (ch);
    }
  }


  /* Second pass: invert pointers.
     Link infix headers in each block in an inverted list of inverted lists.
     Don't forget roots and weak pointers. */
  {
    /* Invert roots first because the threads library needs some heap
       data structures to find its roots.  Fortunately, it doesn't need
       the headers (see above). */
    caml_do_roots_r (ctx, invert_root_r);
    caml_final_do_weak_roots_r (ctx, invert_root_r);

    ch = caml_heap_start;
    while (ch != NULL){
      word *p = (word *) ch;
      chend = ch + Chunk_size (ch);

      while ((char *) p < chend){
        word q = *p;
        size_t sz, i;
        tag_t t;
        word *infixes;

        while (Ecolor (q) == 0) q = * (word *) q;
        sz = Whsize_ehd (q);
        t = Tag_ehd (q);

        if (t == Infix_tag){
          /* Get the original header of this block. */
          infixes = p + sz;
          q = *infixes;
          while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3);
          sz = Whsize_ehd (q);
          t = Tag_ehd (q);
        }

        if (t < No_scan_tag){
          for (i = 1; i < sz; i++) invert_pointer_at_r (ctx, &(p[i]));
        }
        p += sz;
      }
      ch = Chunk_next (ch);
    }
    /* Invert weak pointers. */
    {
      value *pp = &caml_weak_list_head;
      value p;
      word q;
      size_t sz, i;

      while (1){
        p = *pp;
        if (p == (value) NULL) break;
        q = Hd_val (p);
        while (Ecolor (q) == 0) q = * (word *) q;
        sz = Wosize_ehd (q);
        for (i = 1; i < sz; i++){
          if (Field (p,i) != caml_weak_none){
            invert_pointer_at_r (ctx, (word *) &(Field (p,i)));
          }
        }
        invert_pointer_at_r (ctx, (word *) pp);
        pp = &Field (p, 0);
      }
    }
  }


  /* Third pass: reallocate virtually; revert pointers; decode headers.
     Rebuild infix headers. */
  {
    init_compact_allocate_r (ctx);
    ch = caml_heap_start;
    while (ch != NULL){
      word *p = (word *) ch;

      chend = ch + Chunk_size (ch);
      while ((char *) p < chend){
        word q = *p;

        if (Ecolor (q) == 0 || Tag_ehd (q) == Infix_tag){
          /* There were (normal or infix) pointers to this block. */
          size_t sz;
          tag_t t;
          char *newadr;
          word *infixes = NULL;

          while (Ecolor (q) == 0) q = * (word *) q;
          sz = Whsize_ehd (q);
          t = Tag_ehd (q);

          if (t == Infix_tag){
            /* Get the original header of this block. */
            infixes = p + sz;
            q = *infixes;                             Assert (Ecolor (q) == 2);
            while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3);
            sz = Whsize_ehd (q);
            t = Tag_ehd (q);
          }

          newadr = compact_allocate_r (ctx, Bsize_wsize (sz));
          q = *p;
          while (Ecolor (q) == 0){
            word next = * (word *) q;
            * (word *) q = (word) Val_hp (newadr);
            q = next;
          }
          *p = Make_header (Wosize_whsize (sz), t, Caml_white);

          if (infixes != NULL){
            /* Rebuild the infix headers and revert the infix pointers. */
            while (Ecolor ((word) infixes) != 3){
              infixes = (word *) ((word) infixes & ~(uintnat) 3);
              q = *infixes;
              while (Ecolor (q) == 2){
                word next;
                q = (word) q & ~(uintnat) 3;
                next = * (word *) q;
                * (word *) q = (word) Val_hp ((word *) newadr + (infixes - p));
                q = next;
              }                    Assert (Ecolor (q) == 1 || Ecolor (q) == 3);
              *infixes = Make_header (infixes - p, Infix_tag, Caml_white);
              infixes = (word *) q;
            }
          }
          p += sz;
        }else{                                        Assert (Ecolor (q) == 3);
          /* This is guaranteed only if caml_compact_heap was called after a
             nonincremental major GC:       Assert (Tag_ehd (q) == String_tag);
          */
          /* No pointers to the header and no infix header:
             the object was free. */
          *p = Make_header (Wosize_ehd (q), Tag_ehd (q), Caml_blue);
          p += Whsize_ehd (q);
        }
      }
      ch = Chunk_next (ch);
    }
  }


  /* Fourth pass: reallocate and move objects.
     Use the exact same allocation algorithm as pass 3. */
  {
    init_compact_allocate_r (ctx);
    ch = caml_heap_start;
    while (ch != NULL){
      word *p = (word *) ch;

      chend = ch + Chunk_size (ch);
      while ((char *) p < chend){
        word q = *p;
        if (Color_hd (q) == Caml_white){
          size_t sz = Bhsize_hd (q);
          char *newadr = compact_allocate_r (ctx, sz);
          memmove (newadr, p, sz);
          p += Wsize_bsize (sz);
        }else{
          Assert (Color_hd (q) == Caml_blue);
          p += Whsize_hd (q);
        }
      }
      ch = Chunk_next (ch);
    }
  }

  /* Shrink the heap if needed. */
  {
    /* Find the amount of live data and the unshrinkable free space. */
    asize_t live = 0;
    asize_t free = 0;
    asize_t wanted;

    ch = caml_heap_start;
    while (ch != NULL){
      if (Chunk_alloc (ch) != 0){
        live += Wsize_bsize (Chunk_alloc (ch));
        free += Wsize_bsize (Chunk_size (ch) - Chunk_alloc (ch));
      }
      ch = Chunk_next (ch);
    }

    /* Add up the empty chunks until there are enough, then remove the
       other empty chunks. */
    wanted = caml_percent_free * (live / 100 + 1);
    ch = caml_heap_start;
    while (ch != NULL){
      char *next_chunk = Chunk_next (ch);  /* Chunk_next (ch) will be erased */

      if (Chunk_alloc (ch) == 0){
        if (free < wanted){
          free += Wsize_bsize (Chunk_size (ch));
        }else{
          caml_shrink_heap_r (ctx, ch);
        }
      }
      ch = next_chunk;
    }
  }

  /* Rebuild the free list. */
  {
    ch = caml_heap_start;
    caml_fl_reset_r (ctx);
    while (ch != NULL){
      if (Chunk_size (ch) > Chunk_alloc (ch)){
        caml_make_free_blocks_r (ctx, (value *) (ch + Chunk_alloc (ch)),
                                 Wsize_bsize (Chunk_size(ch)-Chunk_alloc(ch)), 1,
                                 Caml_white);
      }
      ch = Chunk_next (ch);
    }
  }
  ++ caml_stat_compactions;
  caml_gc_message (0x10, "done.\n", 0);
}
Пример #2
0
static void extern_rec(value v)
{
 tailcall:
  if (Is_long(v)) {
    intnat n = Long_val(v);
    return;
  }
  else {
    header_t hd = Hd_val(v);
    tag_t tag = Tag_hd(hd);
    mlsize_t sz = Wosize_hd(hd);

    if (tag == Forward_tag) {
      value f = Forward_val (v);
      if (Is_block (f)
          && (Tag_val (f) == Closure_tag || Tag_val (f) == Forward_tag
              || Tag_val (f) == Lazy_tag || Tag_val (f) == Double_tag)){
        /* Do not short-circuit the pointer. */
      }else{
        v = f;
        goto tailcall;
      }
    }
    /* Atoms are treated specially for two reasons: they are not allocated
       in the externed block, and they are automatically shared. */
    if (sz == 0) {
      return;
    }
    /* Check if already seen */
    if (Color_hd(hd) == Caml_blue) {
      uintnat d = obj_counter - (uintnat) Field(v, 0);
      return;
    }

    /* Output the contents of the object */
    switch(tag) {
    case String_tag: {
      mlsize_t len = caml_string_length(v);
      extern_record_location(v);
      break;
    }
    case Double_tag: {
//       if (sizeof(double) != 8)
//         extern_invalid_argument("output_value: non-standard floats");
      extern_record_location(v);
      break;
    }
    case Double_array_tag: {
      mlsize_t nfloats;
//       if (sizeof(double) != 8)
//         extern_invalid_argument("output_value: non-standard floats");
      nfloats = Wosize_val(v) / Double_wosize;
      extern_record_location(v);
      break;
    }
    case Abstract_tag:
//       extern_invalid_argument("output_value: abstract value (Abstract)");
      break;
//     case Infix_tag:
//       writecode32(CODE_INFIXPOINTER, Infix_offset_hd(hd));
//       extern_rec(v - Infix_offset_hd(hd));
//       break;
    case Custom_tag: {
      extern_record_location(v);
      break;
    }
    default: {
      value field0;
      mlsize_t i;
      field0 = Field(v, 0);
      extern_record_location(v);
      if (sz == 1) {
        v = field0;
      } else {
        extern_rec(field0);
        for (i = 1; i < sz - 1; i++) extern_rec(Field(v, i));
        v = Field(v, i);
      }
      goto tailcall;
    }
    }
  }
}
Пример #3
0
static void extern_rec(value v)
{
 tailcall:
  if (Is_long(v)) {
    intnat n = Long_val(v);
    if (n >= 0 && n < 0x40) {
      Write(PREFIX_SMALL_INT + n);
    } else if (n >= -(1 << 7) && n < (1 << 7)) {
      writecode8(CODE_INT8, n);
    } else if (n >= -(1 << 15) && n < (1 << 15)) {
      writecode16(CODE_INT16, n);
#ifdef ARCH_SIXTYFOUR
    } else if (n < -((intnat)1 << 31) || n >= ((intnat)1 << 31)) {
      writecode64(CODE_INT64, n);
#endif
    } else
      writecode32(CODE_INT32, n);
    return;
  }
  if (Is_young(v) || Is_in_heap(v) || Is_atom(v)) {
    header_t hd = Hd_val(v);
    tag_t tag = Tag_hd(hd);
    mlsize_t sz = Wosize_hd(hd);

    if (tag == Forward_tag) {
      value f = Forward_val (v);
      if (Is_block (f) && (Is_young (f) || Is_in_heap (f))
          && (Tag_val (f) == Forward_tag || Tag_val (f) == Lazy_tag
              || Tag_val (f) == Double_tag)){
        /* Do not short-circuit the pointer. */
      }else{
        v = f;
        goto tailcall;
      }
    }
    /* Atoms are treated specially for two reasons: they are not allocated
       in the externed block, and they are automatically shared. */
    if (sz == 0) {
      if (tag < 16) {
        Write(PREFIX_SMALL_BLOCK + tag);
      } else {
        writecode32(CODE_BLOCK32, hd);
      }
      return;
    }
    /* Check if already seen */
    if (Color_hd(hd) == Caml_blue) {
      uintnat d = obj_counter - (uintnat) Field(v, 0);
      if (d < 0x100) {
        writecode8(CODE_SHARED8, d);
      } else if (d < 0x10000) {
        writecode16(CODE_SHARED16, d);
      } else {
        writecode32(CODE_SHARED32, d);
      }
      return;
    }

    /* Output the contents of the object */
    switch(tag) {
    case String_tag: {
      mlsize_t len = caml_string_length(v);
      if (len < 0x20) {
        Write(PREFIX_SMALL_STRING + len);
      } else if (len < 0x100) {
        writecode8(CODE_STRING8, len);
      } else {
        writecode32(CODE_STRING32, len);
      }
      writeblock(String_val(v), len);
      size_32 += 1 + (len + 4) / 4;
      size_64 += 1 + (len + 8) / 8;
      extern_record_location(v);
      break;
    }
    case Double_tag: {
      if (sizeof(double) != 8)
        extern_invalid_argument("output_value: non-standard floats");
      Write(CODE_DOUBLE_NATIVE);
      writeblock_float8((double *) v, 1);
      size_32 += 1 + 2;
      size_64 += 1 + 1;
      extern_record_location(v);
      break;
    }
    case Double_array_tag: {
      mlsize_t nfloats;
      if (sizeof(double) != 8)
        extern_invalid_argument("output_value: non-standard floats");
      nfloats = Wosize_val(v) / Double_wosize;
      if (nfloats < 0x100) {
        writecode8(CODE_DOUBLE_ARRAY8_NATIVE, nfloats);
      } else {
        writecode32(CODE_DOUBLE_ARRAY32_NATIVE, nfloats);
      }
      writeblock_float8((double *) v, nfloats);
      size_32 += 1 + nfloats * 2;
      size_64 += 1 + nfloats;
      extern_record_location(v);
      break;
    }
    case Abstract_tag:
      extern_invalid_argument("output_value: abstract value (Abstract)");
      break;
    case Infix_tag:
      writecode32(CODE_INFIXPOINTER, Infix_offset_hd(hd));
      extern_rec(v - Infix_offset_hd(hd));
      break;
    case Custom_tag: {
      uintnat sz_32, sz_64;
      char * ident = Custom_ops_val(v)->identifier;
      void (*serialize)(value v, uintnat * wsize_32,
                        uintnat * wsize_64)
        = Custom_ops_val(v)->serialize;
      if (serialize == NULL)
        extern_invalid_argument("output_value: abstract value (Custom)");
      Write(CODE_CUSTOM);
      writeblock(ident, strlen(ident) + 1);
      Custom_ops_val(v)->serialize(v, &sz_32, &sz_64);
      size_32 += 2 + ((sz_32 + 3) >> 2);  /* header + ops + data */
      size_64 += 2 + ((sz_64 + 7) >> 3);
      extern_record_location(v);
      break;
    }
    default: {
      value field0;
      mlsize_t i;
      if (tag < 16 && sz < 8) {
        Write(PREFIX_SMALL_BLOCK + tag + (sz << 4));
#ifdef ARCH_SIXTYFOUR
      } else if (hd >= ((uintnat)1 << 32)) {
        writecode64(CODE_BLOCK64, Whitehd_hd (hd));
#endif
      } else {
        writecode32(CODE_BLOCK32, Whitehd_hd (hd));
      }
      size_32 += 1 + sz;
      size_64 += 1 + sz;
      field0 = Field(v, 0);
      extern_record_location(v);
      if (sz == 1) {
        v = field0;
      } else {
        extern_rec(field0);
        for (i = 1; i < sz - 1; i++) extern_rec(Field(v, i));
        v = Field(v, i);
      }
      goto tailcall;
    }
    }
  }
Пример #4
0
value gc_stat (value v) /* ML */
{
  value res;
  long live_words = 0, live_blocks = 0,
       free_words = 0, free_blocks = 0, largest_free = 0,
       fragments = 0, heap_chunks = 0;
  char *chunk = heap_start, *chunk_end;
  char *cur_hp, *prev_hp;
  header_t cur_hd;

  Assert (v == Atom (0));

  while (chunk != NULL){
    ++ heap_chunks;
    chunk_end = chunk + Chunk_size (chunk);
    prev_hp = NULL;
    cur_hp = chunk;
    while (cur_hp < chunk_end){
      cur_hd = Hd_hp (cur_hp);
      switch (Color_hd (cur_hd)){
      case White:
	if (Wosize_hd (cur_hd) == 0){
	  ++fragments;
	  Assert (prev_hp == NULL
		  || (Color_hp (prev_hp) != Blue
		      && Wosize_hp (prev_hp) > 0));
	  Assert (Next (cur_hp) == chunk_end
		  || (Color_hp (Next (cur_hp)) != Blue
		      && Wosize_hp (Next (cur_hp)) > 0));
	  break;
	}
	/* FALLTHROUGH */
      case Gray: case Black:
	Assert (Wosize_hd (cur_hd) > 0);
	++ live_blocks;
	live_words += Whsize_hd (cur_hd);
	break;
      case Blue:
	Assert (Wosize_hd (cur_hd) > 0);
	++ free_blocks;
	free_words += Whsize_hd (cur_hd);
	if (Whsize_hd (cur_hd) > largest_free){
	  largest_free = Whsize_hd (cur_hd);
	}
	Assert (prev_hp == NULL
		|| (Color_hp (prev_hp) != Blue
		    && Wosize_hp (prev_hp) > 0));
	Assert (Next (cur_hp) == chunk_end
		|| (Color_hp (Next (cur_hp)) != Blue
		    && Wosize_hp (Next (cur_hp)) > 0));
	break;
      }
      prev_hp = cur_hp;
      cur_hp = Next (cur_hp);
    }                                          Assert (cur_hp == chunk_end);
    chunk = Chunk_next (chunk);
  }
  
  Assert (live_words + free_words + fragments == Wsize_bsize (stat_heap_size));
  /* Order of elements changed for Moscow ML */
  res = alloc (13, 0);
  Field (res, 11) = Val_long (stat_minor_words
                             + Wsize_bsize (young_ptr - young_start));
  Field (res, 12) = Val_long (stat_promoted_words);
  Field (res,  9) = Val_long (stat_major_words + allocated_words);
  Field (res, 10) = Val_long (stat_minor_collections);
  Field (res,  8) = Val_long (stat_major_collections);
  Field (res,  4) = Val_long (Wsize_bsize (stat_heap_size));
  Field (res,  3) = Val_long (heap_chunks);
  Field (res,  7) = Val_long (live_words);
  Field (res,  6) = Val_long (live_blocks);
  Field (res,  2) = Val_long (free_words);
  Field (res,  1) = Val_long (free_blocks);
  Field (res,  5) = Val_long (largest_free);
  Field (res,  0) = Val_long (fragments);
  return res;
}
Пример #5
0
static void extern_rec(value v)
{
  struct code_fragment * cf;
  struct extern_item * sp;
  sp = extern_stack;

  while(1) {
  if (Is_long(v)) {
    intnat n = Long_val(v);
    if (n >= 0 && n < 0x40) {
      Write(PREFIX_SMALL_INT + n);
    } else if (n >= -(1 << 7) && n < (1 << 7)) {
      writecode8(CODE_INT8, n);
    } else if (n >= -(1 << 15) && n < (1 << 15)) {
      writecode16(CODE_INT16, n);
#ifdef ARCH_SIXTYFOUR
    } else if (n < -((intnat)1 << 31) || n >= ((intnat)1 << 31)) {
      writecode64(CODE_INT64, n);
#endif
    } else
      writecode32(CODE_INT32, n);
    goto next_item;
  }
  if (Is_in_value_area(v)) {
    header_t hd = Hd_val(v);
    tag_t tag = Tag_hd(hd);
    mlsize_t sz = Wosize_hd(hd);

    if (tag == Forward_tag) {
      value f = Forward_val (v);
      if (Is_block (f)
          && (!Is_in_value_area(f) || Tag_val (f) == Forward_tag
              || Tag_val (f) == Lazy_tag || Tag_val (f) == Double_tag)){
        /* Do not short-circuit the pointer. */
      }else{
        v = f;
        continue;
      }
    }
    /* Atoms are treated specially for two reasons: they are not allocated
       in the externed block, and they are automatically shared. */
    if (sz == 0) {
      if (tag < 16) {
        Write(PREFIX_SMALL_BLOCK + tag);
      } else {
        writecode32(CODE_BLOCK32, hd);
      }
      goto next_item;
    }
    /* Check if already seen */
    if (Color_hd(hd) == Caml_blue) {
      uintnat d = obj_counter - (uintnat) Field(v, 0);
      if (d < 0x100) {
        writecode8(CODE_SHARED8, d);
      } else if (d < 0x10000) {
        writecode16(CODE_SHARED16, d);
      } else {
        writecode32(CODE_SHARED32, d);
      }
      goto next_item;
    }

    /* Output the contents of the object */
    switch(tag) {
    case String_tag: {
      mlsize_t len = caml_string_length(v);
      if (len < 0x20) {
        Write(PREFIX_SMALL_STRING + len);
      } else if (len < 0x100) {
        writecode8(CODE_STRING8, len);
      } else {
        writecode32(CODE_STRING32, len);
      }
      writeblock(String_val(v), len);
      size_32 += 1 + (len + 4) / 4;
      size_64 += 1 + (len + 8) / 8;
      extern_record_location(v);
      break;
    }
    case Double_tag: {
      if (sizeof(double) != 8)
        extern_invalid_argument("output_value: non-standard floats");
      Write(CODE_DOUBLE_NATIVE);
      writeblock_float8((double *) v, 1);
      size_32 += 1 + 2;
      size_64 += 1 + 1;
      extern_record_location(v);
      break;
    }
    case Double_array_tag: {
      mlsize_t nfloats;
      if (sizeof(double) != 8)
        extern_invalid_argument("output_value: non-standard floats");
      nfloats = Wosize_val(v) / Double_wosize;
      if (nfloats < 0x100) {
        writecode8(CODE_DOUBLE_ARRAY8_NATIVE, nfloats);
      } else {
        writecode32(CODE_DOUBLE_ARRAY32_NATIVE, nfloats);
      }
      writeblock_float8((double *) v, nfloats);
      size_32 += 1 + nfloats * 2;
      size_64 += 1 + nfloats;
      extern_record_location(v);
      break;
    }
    case Abstract_tag:
      extern_invalid_argument("output_value: abstract value (Abstract)");
      break;
    case Infix_tag:
      writecode32(CODE_INFIXPOINTER, Infix_offset_hd(hd));
      extern_rec(v - Infix_offset_hd(hd));
      break;
    case Custom_tag: {
      uintnat sz_32, sz_64;
      char * ident = Custom_ops_val(v)->identifier;
      void (*serialize)(value v, uintnat * wsize_32,
                        uintnat * wsize_64)
        = Custom_ops_val(v)->serialize;
      if (serialize == NULL)
        extern_invalid_argument("output_value: abstract value (Custom)");
      Write(CODE_CUSTOM);
      writeblock(ident, strlen(ident) + 1);
      Custom_ops_val(v)->serialize(v, &sz_32, &sz_64);
      size_32 += 2 + ((sz_32 + 3) >> 2);  /* header + ops + data */
      size_64 += 2 + ((sz_64 + 7) >> 3);
      extern_record_location(v);
      break;
    }
    default: {
      value field0;
      if (tag < 16 && sz < 8) {
        Write(PREFIX_SMALL_BLOCK + tag + (sz << 4));
#ifdef ARCH_SIXTYFOUR
      } else if (hd >= ((uintnat)1 << 32)) {
        writecode64(CODE_BLOCK64, Whitehd_hd (hd));
#endif
      } else {
        writecode32(CODE_BLOCK32, Whitehd_hd (hd));
      }
      size_32 += 1 + sz;
      size_64 += 1 + sz;
      field0 = Field(v, 0);
      extern_record_location(v);
      /* Remember that we still have to serialize fields 1 ... sz - 1 */
      if (sz > 1) {
        sp++;
        if (sp >= extern_stack_limit) sp = extern_resize_stack(sp);
        sp->v = &Field(v,1);
        sp->count = sz-1;
      }
      /* Continue serialization with the first field */
      v = field0;
      continue;
    }
    }
  }
  else if ((cf = extern_find_code((char *) v)) != NULL) {
    if (!extern_closures)
      extern_invalid_argument("output_value: functional value");
    writecode32(CODE_CODEPOINTER, (char *) v - cf->code_start);
    writeblock((char *) cf->digest, 16);
  } else {
    extern_invalid_argument("output_value: abstract value (outside heap)");
  }
  next_item:
    /* Pop one more item to marshal, if any */
    if (sp == extern_stack) {
        /* We are done.   Cleanup the stack and leave the function */
        extern_free_stack();
        return;
    }
    v = *((sp->v)++);
    if (--(sp->count) == 0) sp--;
  }
Пример #6
0
/* Check the heap structure (if compiled in debug mode) and
   gather statistics; return the stats if [returnstats] is true,
   otherwise return [Val_unit].
*/
static value heap_stats (int returnstats)
{
  CAMLparam0 ();
  intnat live_words = 0, live_blocks = 0,
         free_words = 0, free_blocks = 0, largest_free = 0,
         fragments = 0, heap_chunks = 0;
  char *chunk = caml_heap_start, *chunk_end;
  char *cur_hp, *prev_hp;
  header_t cur_hd;

#ifdef DEBUG
  caml_gc_message (-1, "### O'Caml runtime: heap check ###\n", 0);
#endif

  while (chunk != NULL){
    ++ heap_chunks;
    chunk_end = chunk + Chunk_size (chunk);
    prev_hp = NULL;
    cur_hp = chunk;
    while (cur_hp < chunk_end){
      cur_hd = Hd_hp (cur_hp);
                                           Assert (Next (cur_hp) <= chunk_end);
      switch (Color_hd (cur_hd)){
      case Caml_white:
        if (Wosize_hd (cur_hd) == 0){
          ++ fragments;
          Assert (prev_hp == NULL
                  || Color_hp (prev_hp) != Caml_blue
                  || cur_hp == caml_gc_sweep_hp);
        }else{
          if (caml_gc_phase == Phase_sweep && cur_hp >= caml_gc_sweep_hp){
            ++ free_blocks;
            free_words += Whsize_hd (cur_hd);
            if (Whsize_hd (cur_hd) > largest_free){
              largest_free = Whsize_hd (cur_hd);
            }
          }else{
            ++ live_blocks;
            live_words += Whsize_hd (cur_hd);
#ifdef DEBUG
            check_block (cur_hp);
#endif
          }
        }
        break;
      case Caml_gray: case Caml_black:
        Assert (Wosize_hd (cur_hd) > 0);
        ++ live_blocks;
        live_words += Whsize_hd (cur_hd);
#ifdef DEBUG
        check_block (cur_hp);
#endif
        break;
      case Caml_blue:
        Assert (Wosize_hd (cur_hd) > 0);
        ++ free_blocks;
        free_words += Whsize_hd (cur_hd);
        if (Whsize_hd (cur_hd) > largest_free){
          largest_free = Whsize_hd (cur_hd);
        }
        /* not true any more with big heap chunks
        Assert (prev_hp == NULL
                || (Color_hp (prev_hp) != Caml_blue && Wosize_hp (prev_hp) > 0)
                || cur_hp == caml_gc_sweep_hp);
        Assert (Next (cur_hp) == chunk_end
                || (Color_hp (Next (cur_hp)) != Caml_blue
                    && Wosize_hp (Next (cur_hp)) > 0)
                || (Whsize_hd (cur_hd) + Wosize_hp (Next (cur_hp)) > Max_wosize)
                || Next (cur_hp) == caml_gc_sweep_hp);
        */
        break;
      }
      prev_hp = cur_hp;
      cur_hp = Next (cur_hp);
    }                                          Assert (cur_hp == chunk_end);
    chunk = Chunk_next (chunk);
  }

  Assert (heap_chunks == caml_stat_heap_chunks);
  Assert (live_words + free_words + fragments
          == Wsize_bsize (caml_stat_heap_size));

  if (returnstats){
    CAMLlocal1 (res);

    /* get a copy of these before allocating anything... */
    double minwords = caml_stat_minor_words
                      + (double) Wsize_bsize (caml_young_end - caml_young_ptr);
    double prowords = caml_stat_promoted_words;
    double majwords = caml_stat_major_words + (double) caml_allocated_words;
    intnat mincoll = caml_stat_minor_collections;
    intnat majcoll = caml_stat_major_collections;
    intnat heap_words = Wsize_bsize (caml_stat_heap_size);
    intnat cpct = caml_stat_compactions;
    intnat top_heap_words = Wsize_bsize (caml_stat_top_heap_size);

    res = caml_alloc_tuple (15);
    Store_field (res, 0, caml_copy_double (minwords));
    Store_field (res, 1, caml_copy_double (prowords));
    Store_field (res, 2, caml_copy_double (majwords));
    Store_field (res, 3, Val_long (mincoll));
    Store_field (res, 4, Val_long (majcoll));
    Store_field (res, 5, Val_long (heap_words));
    Store_field (res, 6, Val_long (heap_chunks));
    Store_field (res, 7, Val_long (live_words));
    Store_field (res, 8, Val_long (live_blocks));
    Store_field (res, 9, Val_long (free_words));
    Store_field (res, 10, Val_long (free_blocks));
    Store_field (res, 11, Val_long (largest_free));
    Store_field (res, 12, Val_long (fragments));
    Store_field (res, 13, Val_long (cpct));
    Store_field (res, 14, Val_long (top_heap_words));
    CAMLreturn (res);
  }else{
    CAMLreturn (Val_unit);
  }
}
Пример #7
0
static void extern_rec_r(CAML_R, value v)
{
  struct code_fragment * cf;
  struct extern_item * sp;
  sp = extern_stack;

  while(1) {
    //?????DUMP("QQQ 0x%lx, or %li ", v, v);
  if (Is_long(v)) {
    intnat n = Long_val(v);
    if (n >= 0 && n < 0x40) {
      Write(PREFIX_SMALL_INT + n);
    } else if (n >= -(1 << 7) && n < (1 << 7)) {
      writecode8_r(ctx, CODE_INT8, n);
    } else if (n >= -(1 << 15) && n < (1 << 15)) {
      writecode16_r(ctx, CODE_INT16, n);
#ifdef ARCH_SIXTYFOUR
    } else if (n < -((intnat)1 << 31) || n >= ((intnat)1 << 31)) {
      writecode64_r(ctx, CODE_INT64, n);
#endif
    } else
      writecode32_r(ctx, CODE_INT32, n);
    goto next_item;
  }
  if (Is_in_value_area(v)) {
    header_t hd = Hd_val(v);
    tag_t tag = Tag_hd(hd);
    mlsize_t sz = Wosize_hd(hd);
    //DUMP("dumping %p, tag %i, size %i", (void*)v, (int)tag, (int)sz); // !!!!!!!!!!!!!!!
    if (tag == Forward_tag) {
      value f = Forward_val (v);
      if (Is_block (f)
          && (!Is_in_value_area(f) || Tag_val (f) == Forward_tag
              || Tag_val (f) == Lazy_tag || Tag_val (f) == Double_tag)){
        /* Do not short-circuit the pointer. */
      }else{
        v = f;
        continue;
      }
    }
    /* Atoms are treated specially for two reasons: they are not allocated
       in the externed block, and they are automatically shared. */
    if (sz == 0) {
      if (tag < 16) {
        Write(PREFIX_SMALL_BLOCK + tag);
      } else {
        writecode32_r(ctx, CODE_BLOCK32, hd);
      }
      goto next_item;
    }
    /* Check if already seen */
    if (Color_hd(hd) == Caml_blue) {
      uintnat d = obj_counter - (uintnat) Field(v, 0);
      if (d < 0x100) {
        writecode8_r(ctx, CODE_SHARED8, d);
      } else if (d < 0x10000) {
        writecode16_r(ctx, CODE_SHARED16, d);
      } else {
        writecode32_r(ctx, CODE_SHARED32, d);
      }
      goto next_item;
    }

    /* Output the contents of the object */
    switch(tag) {
    case String_tag: {
      mlsize_t len = caml_string_length(v);
      if (len < 0x20) {
        Write(PREFIX_SMALL_STRING + len);
      } else if (len < 0x100) {
        writecode8_r(ctx, CODE_STRING8, len);
      } else {
        writecode32_r(ctx, CODE_STRING32, len);
      }
      writeblock_r(ctx, String_val(v), len);
      size_32 += 1 + (len + 4) / 4;
      size_64 += 1 + (len + 8) / 8;
      extern_record_location_r(ctx, v);
      break;
    }
    case Double_tag: {
      if (sizeof(double) != 8)
        extern_invalid_argument_r(ctx, "output_value: non-standard floats");
      Write(CODE_DOUBLE_NATIVE);
      writeblock_float8((double *) v, 1);
      size_32 += 1 + 2;
      size_64 += 1 + 1;
      extern_record_location_r(ctx,v);
      break;
    }
    case Double_array_tag: {
      mlsize_t nfloats;
      if (sizeof(double) != 8)
        extern_invalid_argument_r(ctx, "output_value: non-standard floats");
      nfloats = Wosize_val(v) / Double_wosize;
      if (nfloats < 0x100) {
        writecode8_r(ctx, CODE_DOUBLE_ARRAY8_NATIVE, nfloats);
      } else {
        writecode32_r(ctx, CODE_DOUBLE_ARRAY32_NATIVE, nfloats);
      }
      writeblock_float8((double *) v, nfloats);
      size_32 += 1 + nfloats * 2;
      size_64 += 1 + nfloats;
      extern_record_location_r(ctx, v);
      break;
    }
    case Abstract_tag:
      extern_invalid_argument_r(ctx, "output_value: abstract value (Abstract)");
      break;
    case Infix_tag:
      writecode32_r(ctx,CODE_INFIXPOINTER, Infix_offset_hd(hd));
      extern_rec_r(ctx, v - Infix_offset_hd(hd));
      break;
    case Custom_tag: {
      uintnat sz_32, sz_64;
      char * ident = Custom_ops_val(v)->identifier;
      void (*serialize)(value v, uintnat * wsize_32,
                        uintnat * wsize_64);
      //printf("[object at %p, which is a %s custom: BEGIN\n", (void*)v, Custom_ops_val(v)->identifier);
      if(extern_cross_context){
        //printf("About the object at %p, which is a %s custom: USING a cross-context serializer\n", (void*)v, Custom_ops_val(v)->identifier);
        serialize = Custom_ops_val(v)->cross_context_serialize;
      }
      else{
        //printf("About the object at %p, which is a %s custom: NOT using a cross-context serializer\n", (void*)v, Custom_ops_val(v)->identifier);
        serialize = Custom_ops_val(v)->serialize;
      }
      //printf("Still alive 100\n");
      if (serialize == NULL){
        //////
        //struct custom_operations *o = Custom_ops_val(v);
        //printf("About the object at %p, which is a %s custom\n", (void*)v, Custom_ops_val(v)->identifier); volatile int a = 1; a /= 0;
        ///////////
        extern_invalid_argument_r(ctx, "output_value: abstract value (Custom)");
      }
      //printf("Still alive 200\n");
      Write(CODE_CUSTOM);
      //printf("Still alive 300\n");
      writeblock_r(ctx, ident, strlen(ident) + 1);
      //printf("Still alive 400\n");
      serialize(v, &sz_32, &sz_64);
      //printf("Still alive 500\n");
      size_32 += 2 + ((sz_32 + 3) >> 2);  /* header + ops + data */
      size_64 += 2 + ((sz_64 + 7) >> 3);
      //printf("Still alive 600\n");
      extern_record_location_r(ctx,v); // This temporarily breaks the object, by replacing it with a forwarding pointer
      //printf("object at %p, which is a custom: END\n", (void*)v);
      break;
    }
    default: {
      value field0;
      if (tag < 16 && sz < 8) {
        Write(PREFIX_SMALL_BLOCK + tag + (sz << 4));
#ifdef ARCH_SIXTYFOUR
      } else if (hd >= ((uintnat)1 << 32)) {
        writecode64_r(ctx, CODE_BLOCK64, Whitehd_hd (hd));
#endif
      } else {
        writecode32_r(ctx, CODE_BLOCK32, Whitehd_hd (hd));
      }
      size_32 += 1 + sz;
      size_64 += 1 + sz;
      field0 = Field(v, 0);
      extern_record_location_r(ctx, v);
      /* Remember that we still have to serialize fields 1 ... sz - 1 */
      if (sz > 1) {
        sp++;
        if (sp >= extern_stack_limit) sp = extern_resize_stack_r(ctx, sp);
        sp->v = &Field(v,1);
        sp->count = sz-1;
      }
      /* Continue serialization with the first field */
      v = field0;
      continue;
    }
    }
  }
  else if ((cf = extern_find_code_r(ctx, (char *) v)) != NULL) {
    if (!extern_closures){
      extern_invalid_argument_r(ctx, "output_value: functional value"); // FIXME: this is the correct version. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
      //DUMP("output_value: functional value"); {volatile int a = 1; a /= 0;}
      }
    //fprintf(stderr, "ZZZZ dumping a code pointer: BEGIN\n");
    //DUMP("dumping a code pointer 0x%lx, or %li; code start is at %p", v, v, cf->code_start);
    writecode32_r(ctx, CODE_CODEPOINTER, (char *) v - cf->code_start);
    writeblock_r(ctx, (char *) cf->digest, 16);
    //dump_digest(cf->digest);
    //fprintf(stderr, "ZZZZ dumping a code pointer: END\n");
  } else {
    if(extern_cross_context){
      fprintf(stderr, "ZZZZ working on the external pointer: %p, which is to say %li [cf is %p]\n", (void*)v, (long)v, cf);
      //fprintf(stderr, "ZZZZ I'm doing a horrible, horrible thing: serializing the pointer as a tagged 0.\n");
      DUMP("about to crash in the strange case I'm debugging");
      /* DUMP("the object is 0x%lx, or %li ", v, v); */
      /* DUMP("probably crashing now"); */
      /* DUMP("tag is %i", (int)Tag_val(v)); */
      /* DUMP("size is %i", (int)Wosize_val(v)); */
      //volatile int a = 1; a /= 0;
      //extern_rec_r(ctx, Val_int(0));
      /* fprintf(stderr, "ZZZZ [This is probably wrong: I'm marshalling an out-of-heap pointer as an int64]\n"); */
      /* writecode64_r(ctx, CODE_INT64, (v << 1) | 1); */
      extern_invalid_argument_r(ctx, "output_value: abstract value (outside heap) [FIXME: implement]");
    }
    else
      extern_invalid_argument_r(ctx, "output_value: abstract value (outside heap)");
  }
  next_item:
    /* Pop one more item to marshal, if any */
    if (sp == extern_stack) {
        /* We are done.   Cleanup the stack and leave the function */
        extern_free_stack_r(ctx);
        return;
    }
    v = *((sp->v)++);
    if (--(sp->count) == 0) sp--;
  }