Exemplo n.º 1
0
/* [allocate_block] is called by [caml_fl_allocate].  Given a suitable free
   block and the desired size, it allocates a new block from the free
   block.  There are three cases:
   0. The free block has the desired size.  Detach the block from the
      free-list and return it.
   1. The free block is 1 word longer than the desired size.  Detach
      the block from the free list.  The remaining word cannot be linked:
      turn it into an empty block (header only), and return the rest.
   2. The free block is big enough.  Split it in two and return the right
      block.
   In all cases, the allocated block is right-justified in the free block:
   it is located in the high-address words of the free block.  This way,
   the linking of the free-list does not change in case 2.
*/
static char *allocate_block (mlsize_t wh_sz, int flpi, char *prev, char *cur)
{
  header_t h = Hd_bp (cur);
                                             Assert (Whsize_hd (h) >= wh_sz);
  if (Wosize_hd (h) < wh_sz + 1){                        /* Cases 0 and 1. */
    caml_fl_cur_size -= Whsize_hd (h);
    Next (prev) = Next (cur);
                    Assert (Is_in_heap (Next (prev)) || Next (prev) == NULL);
    if (caml_fl_merge == cur) caml_fl_merge = prev;
#ifdef DEBUG
    fl_last = NULL;
#endif
      /* In case 1, the following creates the empty block correctly.
         In case 0, it gives an invalid header to the block.  The function
         calling [caml_fl_allocate] will overwrite it. */
    Hd_op (cur) = Make_header (0, 0, Caml_white);
    if (policy == Policy_first_fit){
      if (flpi + 1 < flp_size && flp[flpi + 1] == cur){
        flp[flpi + 1] = prev;
      }else if (flpi == flp_size - 1){
        beyond = (prev == Fl_head) ? NULL : prev;
        -- flp_size;
      }
    }
  }else{                                                        /* Case 2. */
    caml_fl_cur_size -= wh_sz;
    Hd_op (cur) = Make_header (Wosize_hd (h) - wh_sz, 0, Caml_blue);
  }
  if (policy == Policy_next_fit) fl_prev = prev;
  return cur + Bosize_hd (h) - Bsize_wsize (wh_sz);
}
Exemplo n.º 2
0
value* caml_shared_try_alloc(struct caml_heap_state* local, mlsize_t wosize,
                             tag_t tag, int pinned)
{
  mlsize_t whsize = Whsize_wosize(wosize);
  value* p;
  uintnat colour;

  Assert (wosize > 0);
  Assert (tag != Infix_tag);
  if (whsize <= SIZECLASS_MAX) {
    sizeclass sz = sizeclass_wsize[whsize];
    Assert(wsize_sizeclass[sz] >= whsize);
    p = pool_allocate(local, sz);
    if (!p) return 0;
    struct heap_stats* s = &local->stats;
    s->pool_live_blocks++;
    s->pool_live_words += whsize;
    s->pool_frag_words += wsize_sizeclass[sz] - whsize;
  } else {
    p = large_allocate(local, Bsize_wsize(whsize));
    if (!p) return 0;
  }
  colour = pinned ? NOT_MARKABLE : global.MARKED;
  Hd_hp (p) = Make_header(wosize, tag, colour);
#ifdef DEBUG
  {
    int i;
    for (i = 0; i < wosize; i++) {
      Op_val(Val_hp(p))[i] = Debug_free_major;
    }
  }
#endif
  return p;
}
Exemplo n.º 3
0
static void init_atoms(void)
{
  extern struct segment caml_data_segments[], caml_code_segments[];
  int i;

  for (i = 0; i < 256; i++) {
    caml_atom_table[i] = Make_header(0, i, Caml_white);
  }
  if (caml_page_table_add(In_static_data,
                          caml_atom_table, caml_atom_table + 256) != 0)
    caml_fatal_error("Fatal error: not enough memory for the initial page table");

  for (i = 0; caml_data_segments[i].begin != 0; i++) {
    if (caml_page_table_add(In_static_data,
                            caml_data_segments[i].begin,
                            caml_data_segments[i].end) != 0)
      caml_fatal_error("Fatal error: not enough memory for the initial page table");
  }

  caml_code_area_start = caml_code_segments[0].begin;
  caml_code_area_end = caml_code_segments[0].end;
  for (i = 1; caml_code_segments[i].begin != 0; i++) {
    if (caml_code_segments[i].begin < caml_code_area_start)
      caml_code_area_start = caml_code_segments[i].begin;
    if (caml_code_segments[i].end > caml_code_area_end)
      caml_code_area_end = caml_code_segments[i].end;
  }
}
Exemplo n.º 4
0
value caml_aligned_array_create(size_t alignment, value len)
{
  CAMLparam1 (len);

  void* bp;
  mlsize_t bosize;
  int result;

  bosize = (Int_val(len) + 1) * alignment;
  result = posix_memalign(&bp, alignment, bosize);
  if (result != 0)
  {
    if (result == EINVAL)
      caml_failwith(
        "The alignment was not a power of two, or was not a multiple of sizeof(void *)");
    else if (result == ENOMEM)
      caml_raise_out_of_memory();
    else
      caml_failwith("Unrecognized error");
  }

  /* Leave space for the header */
  bp += alignment;
  
  Hd_bp (bp) =
    Make_header (Wosize_bhsize(Bhsize_bosize(bosize - alignment)),
                 Double_array_tag, Caml_white);

  CAMLreturn (Val_bp(bp));
}
Exemplo n.º 5
0
CAMLexport value caml_alloc_shr (mlsize_t wosize, tag_t tag)
{
  header_t *hp;
  value *new_block;

  if (wosize > Max_wosize) caml_raise_out_of_memory ();
  hp = caml_fl_allocate (wosize);
  if (hp == NULL){
    new_block = expand_heap (wosize);
    if (new_block == NULL) {
      if (caml_in_minor_collection)
        caml_fatal_error ("Fatal error: out of memory.\n");
      else
        caml_raise_out_of_memory ();
    }
    caml_fl_add_blocks ((value) new_block);
    hp = caml_fl_allocate (wosize);
  }

  Assert (Is_in_heap (Val_hp (hp)));

  /* Inline expansion of caml_allocation_color. */
  if (caml_gc_phase == Phase_mark
      || (caml_gc_phase == Phase_sweep && (addr)hp >= (addr)caml_gc_sweep_hp)){
    Hd_hp (hp) = Make_header (wosize, tag, Caml_black);
  }else{
    Assert (caml_gc_phase == Phase_idle
            || (caml_gc_phase == Phase_sweep
                && (addr)hp < (addr)caml_gc_sweep_hp));
    Hd_hp (hp) = Make_header (wosize, tag, Caml_white);
  }
  Assert (Hd_hp (hp) == Make_header (wosize, tag, caml_allocation_color (hp)));
  caml_allocated_words += Whsize_wosize (wosize);
  if (caml_allocated_words > caml_minor_heap_wsz){
    caml_urge_major_slice ();
  }
#ifdef DEBUG
  {
    uintnat i;
    for (i = 0; i < wosize; i++){
      Field (Val_hp (hp), i) = Debug_uninit_major;
    }
  }
#endif
  return Val_hp (hp);
}
Exemplo n.º 6
0
/* Allocate more memory from malloc for the heap.
   Return a blue block of at least the requested size.
   The blue block is chained to a sequence of blue blocks (through their
   field 0); the last block of the chain is pointed by field 1 of the
   first.  There may be a fragment after the last block.
   The caller must insert the blocks into the free list.
   [request] is a number of words and must be less than or equal
   to [Max_wosize].
   Return NULL when out of memory.
*/
static value *expand_heap (mlsize_t request)
{
  /* these point to headers, but we do arithmetic on them, hence [value *]. */
  value *mem, *hp, *prev;
  asize_t over_request, malloc_request, remain;

  Assert (request <= Max_wosize);
  over_request = Whsize_wosize (request + request / 100 * caml_percent_free);
  malloc_request = caml_round_heap_chunk_wsz (over_request);
  mem = (value *) caml_alloc_for_heap (Bsize_wsize (malloc_request));
  if (mem == NULL){
    caml_gc_message (0x04, "No room for growing heap\n", 0);
    return NULL;
  }
  remain = malloc_request;
  prev = hp = mem;
  /* FIXME find a way to do this with a call to caml_make_free_blocks */
  while (Wosize_whsize (remain) > Max_wosize){
    Hd_hp (hp) = Make_header (Max_wosize, 0, Caml_blue);
#ifdef DEBUG
    caml_set_fields (Val_hp (hp), 0, Debug_free_major);
#endif
    hp += Whsize_wosize (Max_wosize);
    remain -= Whsize_wosize (Max_wosize);
    Field (Val_hp (mem), 1) = Field (Val_hp (prev), 0) = Val_hp (hp);
    prev = hp;
  }
  if (remain > 1){
    Hd_hp (hp) = Make_header (Wosize_whsize (remain), 0, Caml_blue);
#ifdef DEBUG
    caml_set_fields (Val_hp (hp), 0, Debug_free_major);
#endif
    Field (Val_hp (mem), 1) = Field (Val_hp (prev), 0) = Val_hp (hp);
    Field (Val_hp (hp), 0) = (value) NULL;
  }else{
    Field (Val_hp (prev), 0) = (value) NULL;
    if (remain == 1) Hd_hp (hp) = Make_header (0, 0, Caml_white);
  }
  Assert (Wosize_hp (mem) >= request);
  if (caml_add_to_heap ((char *) mem) != 0){
    caml_free_for_heap ((char *) mem);
    return NULL;
  }
  return Op_hp (mem);
}
Exemplo n.º 7
0
void caml_array_bound_error(void)
{
  if (! array_bound_error_bucket_inited) {
    mlsize_t wosize = (BOUND_MSG_LEN + sizeof(value)) / sizeof(value);
    mlsize_t offset_index = Bsize_wsize(wosize) - 1;
    array_bound_error_msg.hdr = Make_header(wosize, String_tag, Caml_white);
    array_bound_error_msg.data[offset_index] = offset_index - BOUND_MSG_LEN;
    array_bound_error_bucket.hdr = Make_header(2, 0, Caml_white);
    array_bound_error_bucket.exn = (value) caml_exn_Invalid_argument;
    array_bound_error_bucket.arg = (value) array_bound_error_msg.data;
    array_bound_error_bucket_inited = 1;
    caml_page_table_add(In_static_data,
                        &array_bound_error_msg,
                        &array_bound_error_msg + 1);
    array_bound_error_bucket_inited = 1;
  }
  caml_raise((value) &array_bound_error_bucket.exn);
}
Exemplo n.º 8
0
static void init_atoms(void)
{
  int i;
  for(i = 0; i < 256; i++) caml_atom_table[i] = Make_header(0, i, Caml_white);
  if (caml_page_table_add(In_static_data,
                          caml_atom_table, caml_atom_table + 256) != 0) {
    caml_fatal_error("Fatal error: not enough memory for the initial page table");
  }
}
Exemplo n.º 9
0
/* Allocate more memory from malloc for the heap.
   Return a blue block of at least the requested size.
   The blue block is chained to a sequence of blue blocks (through their
   field 0); the last block of the chain is pointed by field 1 of the
   first.  There may be a fragment after the last block.
   The caller must insert the blocks into the free list.
   The request must be less than or equal to Max_wosize.
   Return NULL when out of memory.
*/
static char *expand_heap (mlsize_t request)
{
  char *mem, *hp, *prev;
  asize_t over_request, malloc_request, remain;

  Assert (request <= Max_wosize);
  over_request = request + request / 100 * caml_percent_free;
  malloc_request = caml_round_heap_chunk_size (Bhsize_wosize (over_request));
  mem = caml_alloc_for_heap (malloc_request);
  if (mem == NULL){
    caml_gc_message (0x04, "No room for growing heap\n", 0);
    return NULL;
  }
  remain = malloc_request;
  prev = hp = mem;
  /* FIXME find a way to do this with a call to caml_make_free_blocks */
  while (Wosize_bhsize (remain) > Max_wosize){
    Hd_hp (hp) = Make_header (Max_wosize, 0, Caml_blue);
#ifdef DEBUG
    caml_set_fields (Bp_hp (hp), 0, Debug_free_major);
#endif
    hp += Bhsize_wosize (Max_wosize);
    remain -= Bhsize_wosize (Max_wosize);
    Field (Op_hp (mem), 1) = Field (Op_hp (prev), 0) = (value) Op_hp (hp);
    prev = hp;
  }
  if (remain > 1){
    Hd_hp (hp) = Make_header (Wosize_bhsize (remain), 0, Caml_blue);
#ifdef DEBUG
    caml_set_fields (Bp_hp (hp), 0, Debug_free_major);
#endif
    Field (Op_hp (mem), 1) = Field (Op_hp (prev), 0) = (value) Op_hp (hp);
    Field (Op_hp (hp), 0) = (value) NULL;
  }else{
    Field (Op_hp (prev), 0) = (value) NULL;
    if (remain == 1) Hd_hp (hp) = Make_header (0, 0, Caml_white);
  }
  Assert (Wosize_hp (mem) >= request);
  if (caml_add_to_heap (mem) != 0){
    caml_free_for_heap (mem);
    return NULL;
  }
  return Bp_hp (mem);
}
Exemplo n.º 10
0
CAMLexport void * caml_stat_alloc (asize_t sz)
{
  void* result = malloc (sizeof(value) + sz);
  if (result == NULL)
    caml_raise_out_of_memory();
  Hd_hp(result) = Make_header(STAT_ALLOC_MAGIC, Abstract_tag, NOT_MARKABLE);
#ifdef DEBUG
  memset ((void*)Val_hp(result), Debug_uninit_stat, sz);
#endif
  return (void*)Val_hp(result);
}
Exemplo n.º 11
0
/* [allocate_block] is called by [caml_fl_allocate].  Given a suitable free
   block and the desired size, it allocates a new block from the free
   block.  There are three cases:
   0. The free block has the desired size.  Detach the block from the
      free-list and return it.
   1. The free block is 1 word longer than the desired size.  Detach
      the block from the free list.  The remaining word cannot be linked:
      turn it into an empty block (header only), and return the rest.
   2. The free block is big enough.  Split it in two and return the right
      block.
   In all cases, the allocated block is right-justified in the free block:
   it is located in the high-address words of the free block.  This way,
   the linking of the free-list does not change in case 2.
*/
static char *allocate_block (mlsize_t wh_sz, char *prev, char *cur)
{
  header_t h = Hd_bp (cur);
                                             Assert (Whsize_hd (h) >= wh_sz);
  if (Wosize_hd (h) < wh_sz + 1){                        /* Cases 0 and 1. */
    caml_fl_cur_size -= Whsize_hd (h);
    Next (prev) = Next (cur);
                    Assert (Is_in_heap (Next (prev)) || Next (prev) == NULL);
    if (caml_fl_merge == cur) caml_fl_merge = prev;
#ifdef DEBUG
    fl_last = NULL;
#endif
      /* In case 1, the following creates the empty block correctly.
         In case 0, it gives an invalid header to the block.  The function
         calling [caml_fl_allocate] will overwrite it. */
    Hd_op (cur) = Make_header (0, 0, Caml_white);
  }else{                                                        /* Case 2. */
    caml_fl_cur_size -= wh_sz;
    Hd_op (cur) = Make_header (Wosize_hd (h) - wh_sz, 0, Caml_blue);
  }
  fl_prev = prev;
  return cur + Bosize_hd (h) - Bsize_wsize (wh_sz);
}  
Exemplo n.º 12
0
EXTERN value alloc_shr (mlsize_t wosize, tag_t tag)
{
  char *hp, *new_block;

  hp = fl_allocate (wosize);
  if (hp == NULL){
    new_block = expand_heap (wosize);
    if (new_block == NULL) raise_out_of_memory ();
    fl_add_block (new_block);
    hp = fl_allocate (wosize);
    if (hp == NULL) fatal_error ("alloc_shr: expand heap failed\n");
  }

  Assert (Is_in_heap (Val_hp (hp)));

  if (gc_phase == Phase_mark || (addr)hp >= (addr)gc_sweep_hp){
    Hd_hp (hp) = Make_header (wosize, tag, Black);
  }else{
    Hd_hp (hp) = Make_header (wosize, tag, White);
  }
  allocated_words += Whsize_wosize (wosize);
  if (allocated_words > Wsize_bsize (minor_heap_size)) force_minor_gc ();
  return Val_hp (hp);
}
Exemplo n.º 13
0
/* Cut a block of memory into Max_wosize pieces, give them headers,
   and optionally merge them into the free list.
   arguments:
   p: pointer to the first word of the block
   size: size of the block (in words)
   do_merge: 1 -> do merge; 0 -> do not merge
   color: which color to give to the pieces; if [do_merge] is 1, this
          is overridden by the merge code, but we have historically used
          [Caml_white].
*/
void caml_make_free_blocks (value *p, mlsize_t size, int do_merge, int color)
{
  mlsize_t sz;

  while (size > 0){
    if (size > Whsize_wosize (Max_wosize)){
      sz = Whsize_wosize (Max_wosize);
    }else{
      sz = size;
    }
    *(header_t *)p = Make_header (Wosize_whsize (sz), 0, color);
    if (do_merge) caml_fl_merge_block (Bp_hp (p));
    size -= sz;
    p += sz;
  }
}
Exemplo n.º 14
0
/* Allocate more memory from malloc for the heap.
   Return a blue block of at least the requested size (in words).
   The caller must insert the block into the free list.
   The request must be less than or equal to Max_wosize.
   Return NULL when out of memory.
*/
static char *expand_heap (mlsize_t request)
{
  char *mem;
  asize_t malloc_request;

  malloc_request = caml_round_heap_chunk_size (Bhsize_wosize (request));
  mem = caml_alloc_for_heap (malloc_request);
  if (mem == NULL){
    caml_gc_message (0x04, "No room for growing heap\n", 0);
    return NULL;
  }
  Assert (Wosize_bhsize (malloc_request) >= request);
  Hd_hp (mem) = Make_header (Wosize_bhsize (malloc_request), 0, Caml_blue);

  if (caml_add_to_heap (mem) != 0){
    caml_free_for_heap (mem);
    return NULL;
  }
  return Bp_hp (mem);
}
Exemplo n.º 15
0
value* caml_shared_try_alloc(struct caml_heap_state* local, mlsize_t wosize, tag_t tag, int pinned) {
  mlsize_t whsize = Whsize_wosize(wosize);
  value* p;
  Assert (wosize > 0);
  Assert (tag != Infix_tag);
  if (whsize <= SIZECLASS_MAX) {
    p = pool_allocate(local, sizeclass_wsize[whsize]);
  } else {
    p = large_allocate(local, Bsize_wsize(whsize));
  }
  if (!p) return 0;
  Hd_hp (p) = Make_header(wosize, tag, pinned ? NOT_MARKABLE : global.UNMARKED);
#ifdef DEBUG
  {
    int i;
    for (i = 0; i < wosize; i++) {
      Op_val(Val_hp(p))[i] = Debug_free_major;
    }
  }
#endif
  return p;
}
Exemplo n.º 16
0
static void init_atoms(void)
{
  extern struct segment caml_data_segments[], caml_code_segments[];
  int i;
  struct code_fragment * cf;

  for (i = 0; i < 256; i++) {
    caml_atom_table[i] = Make_header(0, i, Caml_white);
  }
  if (caml_page_table_add(In_static_data,
                          caml_atom_table, caml_atom_table + 256) != 0)
    caml_fatal_error("Fatal error: not enough memory for initial page table");

  for (i = 0; caml_data_segments[i].begin != 0; i++) {
    /* PR#5509: we must include the zero word at end of data segment,
       because pointers equal to caml_data_segments[i].end are static data. */
    if (caml_page_table_add(In_static_data,
                            caml_data_segments[i].begin,
                            caml_data_segments[i].end + sizeof(value)) != 0)
      caml_fatal_error("Fatal error: not enough memory for initial page table");
  }

  caml_code_area_start = caml_code_segments[0].begin;
  caml_code_area_end = caml_code_segments[0].end;
  for (i = 1; caml_code_segments[i].begin != 0; i++) {
    if (caml_code_segments[i].begin < caml_code_area_start)
      caml_code_area_start = caml_code_segments[i].begin;
    if (caml_code_segments[i].end > caml_code_area_end)
      caml_code_area_end = caml_code_segments[i].end;
  }
  /* Register the code in the table of code fragments */
  cf = caml_stat_alloc(sizeof(struct code_fragment));
  cf->code_start = caml_code_area_start;
  cf->code_end = caml_code_area_end;
  cf->digest_computed = 0;
  caml_ext_table_init(&caml_code_fragments_table, 8);
  caml_ext_table_add(&caml_code_fragments_table, cf);
}
Exemplo n.º 17
0
void adjust_pointers(value * start, mlsize_t size, color_t color)
{
  value * p, * q;
  mlsize_t sz;
  header_t hd;
  tag_t tag;
  value v;
  mlsize_t bosize;

  p = start;
  q = p + size;
  bosize = Bsize_wsize(size);
  while (p < q) {
    hd = *p;
    sz = Wosize_hd(hd);
    tag = Tag_hd(hd);
    *p++ = Make_header(sz, tag, color);
    if (tag >= No_scan_tag)
      p += sz;
    else
      for( ; sz > 0; sz--, p++) {
        v = *p;
        switch(v & 3) {
        case 0:                 /* 0 -> A bloc represented by its offset. */
          assert(v >= 0 && v <= bosize && (v & 3) == 0);
          *p = (value) ((byteoffset_t) start + v);
          break;
        case 2:                 /* 2 -> An atom. */
          v = v >> 2;
          assert(v >= 0 && v < 256);
          *p = Atom(v);
          break;
        default:                /* 1 or 3 -> An integer. */
          break;
        }
      }
  }
}
Exemplo n.º 18
0
static void do_compaction_r (CAML_R)
{
  char *ch, *chend;
                                          Assert (caml_gc_phase == Phase_idle);
  caml_gc_message (0x10, "Compacting heap...\n", 0);

#ifdef DEBUG
  caml_heap_check_r (ctx);
#endif

  /* First pass: encode all noninfix headers. */
  {
    ch = caml_heap_start;
    while (ch != NULL){
      header_t *p = (header_t *) ch;

      chend = ch + Chunk_size (ch);
      while ((char *) p < chend){
        header_t hd = Hd_hp (p);
        mlsize_t sz = Wosize_hd (hd);

        if (Is_blue_hd (hd)){
          /* Free object.  Give it a string tag. */
          Hd_hp (p) = Make_ehd (sz, String_tag, 3);
        }else{                                      Assert (Is_white_hd (hd));
          /* Live object.  Keep its tag. */
          Hd_hp (p) = Make_ehd (sz, Tag_hd (hd), 3);
        }
        p += Whsize_wosize (sz);
      }
      ch = Chunk_next (ch);
    }
  }


  /* Second pass: invert pointers.
     Link infix headers in each block in an inverted list of inverted lists.
     Don't forget roots and weak pointers. */
  {
    /* Invert roots first because the threads library needs some heap
       data structures to find its roots.  Fortunately, it doesn't need
       the headers (see above). */
    caml_do_roots_r (ctx, invert_root_r);
    caml_final_do_weak_roots_r (ctx, invert_root_r);

    ch = caml_heap_start;
    while (ch != NULL){
      word *p = (word *) ch;
      chend = ch + Chunk_size (ch);

      while ((char *) p < chend){
        word q = *p;
        size_t sz, i;
        tag_t t;
        word *infixes;

        while (Ecolor (q) == 0) q = * (word *) q;
        sz = Whsize_ehd (q);
        t = Tag_ehd (q);

        if (t == Infix_tag){
          /* Get the original header of this block. */
          infixes = p + sz;
          q = *infixes;
          while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3);
          sz = Whsize_ehd (q);
          t = Tag_ehd (q);
        }

        if (t < No_scan_tag){
          for (i = 1; i < sz; i++) invert_pointer_at_r (ctx, &(p[i]));
        }
        p += sz;
      }
      ch = Chunk_next (ch);
    }
    /* Invert weak pointers. */
    {
      value *pp = &caml_weak_list_head;
      value p;
      word q;
      size_t sz, i;

      while (1){
        p = *pp;
        if (p == (value) NULL) break;
        q = Hd_val (p);
        while (Ecolor (q) == 0) q = * (word *) q;
        sz = Wosize_ehd (q);
        for (i = 1; i < sz; i++){
          if (Field (p,i) != caml_weak_none){
            invert_pointer_at_r (ctx, (word *) &(Field (p,i)));
          }
        }
        invert_pointer_at_r (ctx, (word *) pp);
        pp = &Field (p, 0);
      }
    }
  }


  /* Third pass: reallocate virtually; revert pointers; decode headers.
     Rebuild infix headers. */
  {
    init_compact_allocate_r (ctx);
    ch = caml_heap_start;
    while (ch != NULL){
      word *p = (word *) ch;

      chend = ch + Chunk_size (ch);
      while ((char *) p < chend){
        word q = *p;

        if (Ecolor (q) == 0 || Tag_ehd (q) == Infix_tag){
          /* There were (normal or infix) pointers to this block. */
          size_t sz;
          tag_t t;
          char *newadr;
          word *infixes = NULL;

          while (Ecolor (q) == 0) q = * (word *) q;
          sz = Whsize_ehd (q);
          t = Tag_ehd (q);

          if (t == Infix_tag){
            /* Get the original header of this block. */
            infixes = p + sz;
            q = *infixes;                             Assert (Ecolor (q) == 2);
            while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3);
            sz = Whsize_ehd (q);
            t = Tag_ehd (q);
          }

          newadr = compact_allocate_r (ctx, Bsize_wsize (sz));
          q = *p;
          while (Ecolor (q) == 0){
            word next = * (word *) q;
            * (word *) q = (word) Val_hp (newadr);
            q = next;
          }
          *p = Make_header (Wosize_whsize (sz), t, Caml_white);

          if (infixes != NULL){
            /* Rebuild the infix headers and revert the infix pointers. */
            while (Ecolor ((word) infixes) != 3){
              infixes = (word *) ((word) infixes & ~(uintnat) 3);
              q = *infixes;
              while (Ecolor (q) == 2){
                word next;
                q = (word) q & ~(uintnat) 3;
                next = * (word *) q;
                * (word *) q = (word) Val_hp ((word *) newadr + (infixes - p));
                q = next;
              }                    Assert (Ecolor (q) == 1 || Ecolor (q) == 3);
              *infixes = Make_header (infixes - p, Infix_tag, Caml_white);
              infixes = (word *) q;
            }
          }
          p += sz;
        }else{                                        Assert (Ecolor (q) == 3);
          /* This is guaranteed only if caml_compact_heap was called after a
             nonincremental major GC:       Assert (Tag_ehd (q) == String_tag);
          */
          /* No pointers to the header and no infix header:
             the object was free. */
          *p = Make_header (Wosize_ehd (q), Tag_ehd (q), Caml_blue);
          p += Whsize_ehd (q);
        }
      }
      ch = Chunk_next (ch);
    }
  }


  /* Fourth pass: reallocate and move objects.
     Use the exact same allocation algorithm as pass 3. */
  {
    init_compact_allocate_r (ctx);
    ch = caml_heap_start;
    while (ch != NULL){
      word *p = (word *) ch;

      chend = ch + Chunk_size (ch);
      while ((char *) p < chend){
        word q = *p;
        if (Color_hd (q) == Caml_white){
          size_t sz = Bhsize_hd (q);
          char *newadr = compact_allocate_r (ctx, sz);
          memmove (newadr, p, sz);
          p += Wsize_bsize (sz);
        }else{
          Assert (Color_hd (q) == Caml_blue);
          p += Whsize_hd (q);
        }
      }
      ch = Chunk_next (ch);
    }
  }

  /* Shrink the heap if needed. */
  {
    /* Find the amount of live data and the unshrinkable free space. */
    asize_t live = 0;
    asize_t free = 0;
    asize_t wanted;

    ch = caml_heap_start;
    while (ch != NULL){
      if (Chunk_alloc (ch) != 0){
        live += Wsize_bsize (Chunk_alloc (ch));
        free += Wsize_bsize (Chunk_size (ch) - Chunk_alloc (ch));
      }
      ch = Chunk_next (ch);
    }

    /* Add up the empty chunks until there are enough, then remove the
       other empty chunks. */
    wanted = caml_percent_free * (live / 100 + 1);
    ch = caml_heap_start;
    while (ch != NULL){
      char *next_chunk = Chunk_next (ch);  /* Chunk_next (ch) will be erased */

      if (Chunk_alloc (ch) == 0){
        if (free < wanted){
          free += Wsize_bsize (Chunk_size (ch));
        }else{
          caml_shrink_heap_r (ctx, ch);
        }
      }
      ch = next_chunk;
    }
  }

  /* Rebuild the free list. */
  {
    ch = caml_heap_start;
    caml_fl_reset_r (ctx);
    while (ch != NULL){
      if (Chunk_size (ch) > Chunk_alloc (ch)){
        caml_make_free_blocks_r (ctx, (value *) (ch + Chunk_alloc (ch)),
                                 Wsize_bsize (Chunk_size(ch)-Chunk_alloc(ch)), 1,
                                 Caml_white);
      }
      ch = Chunk_next (ch);
    }
  }
  ++ caml_stat_compactions;
  caml_gc_message (0x10, "done.\n", 0);
}
Exemplo n.º 19
0
/* [caml_fl_merge_block] returns the head pointer of the next block after [bp],
   because merging blocks may change the size of [bp]. */
char *caml_fl_merge_block (char *bp)
{
  char *prev, *cur, *adj;
  header_t hd = Hd_bp (bp);
  mlsize_t prev_wosz;

  caml_fl_cur_size += Whsize_hd (hd);

#ifdef DEBUG
  caml_set_fields (bp, 0, Debug_free_major);
#endif
  prev = caml_fl_merge;
  cur = Next (prev);
  /* The sweep code makes sure that this is the right place to insert
     this block: */
  Assert (prev < bp || prev == Fl_head);
  Assert (cur > bp || cur == NULL);

  if (policy == Policy_first_fit) truncate_flp (prev);

  /* If [last_fragment] and [bp] are adjacent, merge them. */
  if (last_fragment == Hp_bp (bp)){
    mlsize_t bp_whsz = Whsize_bp (bp);
    if (bp_whsz <= Max_wosize){
      hd = Make_header (bp_whsz, 0, Caml_white);
      bp = last_fragment;
      Hd_bp (bp) = hd;
      caml_fl_cur_size += Whsize_wosize (0);
    }
  }

  /* If [bp] and [cur] are adjacent, remove [cur] from the free-list
     and merge them. */
  adj = bp + Bosize_hd (hd);
  if (adj == Hp_bp (cur)){
    char *next_cur = Next (cur);
    mlsize_t cur_whsz = Whsize_bp (cur);

    if (Wosize_hd (hd) + cur_whsz <= Max_wosize){
      Next (prev) = next_cur;
      if (policy == Policy_next_fit && fl_prev == cur) fl_prev = prev;
      hd = Make_header (Wosize_hd (hd) + cur_whsz, 0, Caml_blue);
      Hd_bp (bp) = hd;
      adj = bp + Bosize_hd (hd);
#ifdef DEBUG
      fl_last = NULL;
      Next (cur) = (char *) Debug_free_major;
      Hd_bp (cur) = Debug_free_major;
#endif
      cur = next_cur;
    }
  }
  /* If [prev] and [bp] are adjacent merge them, else insert [bp] into
     the free-list if it is big enough. */
  prev_wosz = Wosize_bp (prev);
  if (prev + Bsize_wsize (prev_wosz) == Hp_bp (bp)
      && prev_wosz + Whsize_hd (hd) < Max_wosize){
    Hd_bp (prev) = Make_header (prev_wosz + Whsize_hd (hd), 0,Caml_blue);
#ifdef DEBUG
    Hd_bp (bp) = Debug_free_major;
#endif
    Assert (caml_fl_merge == prev);
  }else if (Wosize_hd (hd) != 0){
    Hd_bp (bp) = Bluehd_hd (hd);
    Next (bp) = cur;
    Next (prev) = bp;
    caml_fl_merge = bp;
  }else{
    /* This is a fragment.  Leave it in white but remember it for eventual
       merging with the next block. */
    last_fragment = bp;
    caml_fl_cur_size -= Whsize_wosize (0);
  }
  return adj;
}
Exemplo n.º 20
0
   This makes the merging of adjacent free blocks possible.
   (See [caml_fl_merge_block].)
*/

typedef struct {
  char *next_bp;   /* Pointer to the first byte of the next block. */
} block;

/* The sentinel can be located anywhere in memory, but it must not be
   adjacent to any heap object. */
static struct {
  value filler1; /* Make sure the sentinel is never adjacent to any block. */
  header_t h;
  value first_bp;
  value filler2; /* Make sure the sentinel is never adjacent to any block. */
} sentinel = {0, Make_header (0, 0, Caml_blue), 0, 0};

#define Fl_head ((char *) (&(sentinel.first_bp)))
static char *fl_prev = Fl_head;  /* Current allocation pointer. */
static char *fl_last = NULL;     /* Last block in the list.  Only valid
                                 just after [caml_fl_allocate] returns NULL. */
char *caml_fl_merge = Fl_head;   /* Current insertion pointer.  Managed
                                    jointly with [sweep_slice]. */
asize_t caml_fl_cur_size = 0;    /* Number of words in the free list,
                                    including headers but not fragments. */

#define FLP_MAX 1000
static char *flp [FLP_MAX];
static int flp_size = 0;
static char *beyond = NULL;
Exemplo n.º 21
0
void caml_init_exceptions(void)
{
  out_of_memory_bucket.hdr = Make_header(1, 0, Caml_white);
  out_of_memory_bucket.exn = Field(caml_global_data, OUT_OF_MEMORY_EXN);
  caml_register_global_root(&out_of_memory_bucket.exn);
}
Exemplo n.º 22
0
/* Allocate more memory from malloc for the heap.
   Return a block of at least the requested size (in words).
   Return NULL when out of memory.
*/
static char *expand_heap (mlsize_t request)
{
	char *mem;
	char *new_page_table = NULL;
	size_t new_page_table_size = 0;
	size_t malloc_request;
	size_t i, more_pages;

	malloc_request = round_heap_chunk_size (Bhsize_wosize (request));
	gc_message ("Growing heap to %ldk\n",
		    (stat_heap_size + malloc_request) / 1024);
	mem = aligned_malloc (malloc_request + sizeof (heap_chunk_head),
			      sizeof (heap_chunk_head));
	if (mem == NULL){
		gc_message ("No room for growing heap\n", 0);
		return NULL;
	}

	mem += sizeof (heap_chunk_head);
	(((heap_chunk_head *) mem) [-1]).size = malloc_request;
	assert (Wosize_bhsize (malloc_request) >= request);
	Hd_hp (mem) = Make_header (Wosize_bhsize (malloc_request), 0, Blue);

	/* The else if check here can never be negative since have mem >= heap_start
	 * so the Page calculation will be positive. Hence the (unsigned) cast is valid
	 */
	if (mem < heap_start) {
		more_pages = -Page (mem);
	} else if ((unsigned) Page(mem + malloc_request) > page_table_size) {
		assert (mem >= heap_end);
		more_pages = Page (mem + malloc_request) - page_table_size;
	} else {
		more_pages = 0;
	}

	if (more_pages != 0) {
		new_page_table_size = page_table_size + more_pages;
		new_page_table = (char *) malloc (new_page_table_size);
		if (new_page_table == NULL){
			gc_message ("No room for growing page table\n", 0);
			free (mem);
			return NULL;
		}
	}

	if (mem < heap_start) {
		assert (more_pages != 0);
		for (i = 0; i < more_pages; i++){
			new_page_table [i] = Not_in_heap;
		}
		bcopy (page_table, new_page_table + more_pages, page_table_size);
		(((heap_chunk_head *) mem) [-1]).next = heap_start;
		heap_start = mem;
	} else {
		char **last;
		char *cur;

		if (mem >= heap_end) heap_end = mem + malloc_request;
		if (more_pages != 0) {
			for (i = page_table_size; i < new_page_table_size; i++) {
				new_page_table [i] = Not_in_heap;
			}
			bcopy (page_table, new_page_table, page_table_size);
		}
		last = &heap_start;
		cur = *last;
		while (cur != NULL && cur < mem){
			last = &((((heap_chunk_head *) cur) [-1]).next);
			cur = *last;
		}
		(((heap_chunk_head *) mem) [-1]).next = cur;
		*last = mem;
	}

	if (more_pages != 0) {
		free (page_table);
		page_table = new_page_table;
		page_table_size = new_page_table_size;
	}

	for (i = Page (mem); i < (unsigned) Page (mem + malloc_request); i++){
		page_table [i] = In_heap;
	}
	stat_heap_size += malloc_request;
	return Bp_hp (mem);
}
Exemplo n.º 23
0
   This makes the merging of adjacent free blocks possible.
   (See [caml_fl_merge_block].)
*/

/* A free list block is a [value] (integer representing a pointer to the
   first word after the block's header). The end of the  list is NULL. */
#define Val_NULL ((value) NULL)

/* The sentinel can be located anywhere in memory, but it must not be
   adjacent to any heap object. */
static struct {
  value filler1; /* Make sure the sentinel is never adjacent to any block. */
  header_t h;
  value first_field;
  value filler2; /* Make sure the sentinel is never adjacent to any block. */
} sentinel = {0, Make_header (0, 0, Caml_blue), Val_NULL, 0};

#define Fl_head (Val_bp (&(sentinel.first_field)))
static value fl_prev = Fl_head;  /* Current allocation pointer. */
static value fl_last = Val_NULL; /* Last block in the list.  Only valid
                                  just after [caml_fl_allocate] returns NULL. */
value caml_fl_merge = Fl_head;   /* Current insertion pointer.  Managed
                                    jointly with [sweep_slice]. */
asize_t caml_fl_cur_wsz = 0;     /* Number of words in the free list,
                                    including headers but not fragments. */

#define FLP_MAX 1000
static value flp [FLP_MAX];
static int flp_size = 0;
static value beyond = Val_NULL;
Exemplo n.º 24
0
/* Allocate more memory from malloc for the heap.
   Return a block of at least the requested size (in words).
   Return NULL when out of memory.
*/
static char *expand_heap (mlsize_t request)
{
  char *mem;
  char *new_page_table = NULL;
  asize_t new_page_table_size = 0;
  asize_t malloc_request;
  asize_t i, more_pages;

  malloc_request = round_heap_chunk_size (Bhsize_wosize (request));
  gc_message ("Growing heap to %ldk\n",
	      (stat_heap_size + malloc_request) / 1024);
  mem = aligned_malloc (malloc_request + sizeof (heap_chunk_head),
                        sizeof (heap_chunk_head));
  if (mem == NULL){
    gc_message ("No room for growing heap\n", 0);
    return NULL;
  }
  mem += sizeof (heap_chunk_head);
  (((heap_chunk_head *) mem) [-1]).size = malloc_request;
  Assert (Wosize_bhsize (malloc_request) >= request);
  Hd_hp (mem) = Make_header (Wosize_bhsize (malloc_request), 0, Blue);

#ifndef SIXTEEN
  if (mem < heap_start){
    /* This is WRONG, Henning Niss 2005: */
    more_pages = -Page (mem);
  }else if (Page (mem + malloc_request) > page_table_size){
    Assert (mem >= heap_end);
    more_pages = Page (mem + malloc_request) - page_table_size;
  }else{
    more_pages = 0;
  }

  if (more_pages != 0){
    new_page_table_size = page_table_size + more_pages;
    new_page_table = (char *) malloc (new_page_table_size);
    if (new_page_table == NULL){
      gc_message ("No room for growing page table\n", 0);
      free (mem);
      return NULL;
    }
  }

  if (mem < heap_start){
    Assert (more_pages != 0);
    for (i = 0; i < more_pages; i++){
      new_page_table [i] = Not_in_heap;
    }
    bcopy (page_table, new_page_table + more_pages, page_table_size);
    (((heap_chunk_head *) mem) [-1]).next = heap_start;
    heap_start = mem;
  }else{
    char **last;
    char *cur;

    if (mem >= heap_end) heap_end = mem + malloc_request;
    if (more_pages != 0){
      for (i = page_table_size; i < new_page_table_size; i++){
        new_page_table [i] = Not_in_heap;
      }
      bcopy (page_table, new_page_table, page_table_size);
    }
    last = &heap_start;
    cur = *last;
    while (cur != NULL && cur < mem){
      last = &((((heap_chunk_head *) cur) [-1]).next);
      cur = *last;
    }
    (((heap_chunk_head *) mem) [-1]).next = cur;
    *last = mem;
  }

  if (more_pages != 0){
    free (page_table);
    page_table = new_page_table;
    page_table_size = new_page_table_size;
  }
#else                           /* Simplified version for the 8086 */
  {
    char **last;
    char *cur;

    last = &heap_start;
    cur = *last;
    while (cur != NULL && (char huge *) cur < (char huge *) mem){
      last = &((((heap_chunk_head *) cur) [-1]).next);
      cur = *last;
    }
    (((heap_chunk_head *) mem) [-1]).next = cur;
    *last = mem;
  }
#endif

  for (i = Page (mem); i < Page (mem + malloc_request); i++){
    page_table [i] = In_heap;
  }
  stat_heap_size += malloc_request;
  return Bp_hp (mem);
}
Exemplo n.º 25
0
static void init_atoms(void)
{
  int i;
  for(i = 0; i < 256; i++) caml_atom_table[i] = Make_header(0, i, Caml_white);
}
Exemplo n.º 26
0
static int shrink_block(value64 * source, value * dest, mlsize_t source_len, mlsize_t dest_len, color_t color)
{
  value64 * p, * q;
  value * d, * e;
  header_t hd;
  mlsize_t sz;
  tag_t tag;
  byteoffset_t * forward_addr;
  byteoffset_t dest_ofs;
  value v;

  /* First pass: copy the objects and set up forwarding pointers.
     The pointers contained inside blocks are not resolved. */

  for (p = source, q = source + source_len, d = dest; p < q; /*nothing*/) {
    hd = (header_t)(p->lsw);
    p++;
    sz = Wosize_hd(hd);
    tag = Tag_hd(hd);
    forward_addr = (byteoffset_t *) p;
    dest_ofs = d + 1 - dest;
    switch(tag) {
    case String_tag:
      { mlsize_t ofs_last_byte, len, new_sz;
        ofs_last_byte = sz * sizeof(value64) - 1;
        len = ofs_last_byte - Byte(p, ofs_last_byte);
        new_sz = (len + sizeof(value)) / sizeof(value);
        *d++ = Make_header(new_sz, String_tag, color);
        Field(d, new_sz - 1) = 0;
        bcopy(p, d, len);
        ofs_last_byte = new_sz * sizeof(value) - 1;
        Byte(d, ofs_last_byte) = ofs_last_byte - len;
        p += sz;
        d += new_sz;
        break;
      }
    case Double_tag:
      *d++ = Make_header(Double_wosize, Double_tag, color);
      Store_double_val((value)d, Double_val((value)p));
      p += sizeof(double) / sizeof(value64);
      d += sizeof(double) / sizeof(value);
      break;
    default:
      *d++ = Make_header(sz, tag, color);
      for (/*nothing*/; sz > 0; sz--, p++, d++) {
        value lsw = p->lsw;
        value msw = p->msw;
        if ((lsw & 1) == 0) {      /* If relative displacement: */
          if (msw != 0) return -1; /* Check unsigned displacement fits in 32 */
        } else {                   /* Otherwise, it's a signed integer */
          if ((lsw >= 0 && msw != 0) || (lsw < 0 && msw != -1)) return -1;
        }
        *d = lsw;
      }
    }
    *forward_addr = dest_ofs;   /* store the forwarding pointer */
  }
  assert(d == dest + dest_len);

  /* Second pass: resolve pointers contained inside blocks,
     replacing them by the corresponding forwarding pointer. */

  for (d = dest, e = dest + dest_len; d < e; /*nothing*/) {
    hd = (header_t) *d++;
    sz = Wosize_hd(hd);
    tag = Tag_hd(hd);
    if (tag >= No_scan_tag) {
      d += sz;
    } else {
      for (/*nothing*/; sz > 0; sz--, d++) {
        v = *d;
        switch(v & 3) {
        case 0:                 /* 0: a block represented by its offset */
          assert(v >= 0 && v < source_len * sizeof(value64) && (v & 7) == 0);
          *d = (value) (dest + *((byteoffset_t *)((char *) source + v)));
          break;
        case 2:                 /* 2: an atom */
          v = v >> 2;
          assert(v >= 0 && v < 256);
          *d = Atom(v);
          break;
        default:                /* 1 or 3: an integer */
          break;
        }
      }
    }
  }
  return 0;
}
Exemplo n.º 27
0
static void init_atoms(void)
{
  int i;
  for(i = 0; i < 256; i++) first_atoms[i] = Make_header(0, i, White);
}
Exemplo n.º 28
0
static void expand_block(value32 * source, value * dest, mlsize_t source_len, mlsize_t dest_len, color_t color)
{
  value32 * p, * q;
  value * d, * e;
  header_t hd;
  mlsize_t sz;
  tag_t tag;
  uint32_t * forward_addr;
  uint32_t dest_ofs;
  value v;

  /* First pass: copy the objects and set up forwarding pointers.
     The pointers contained inside blocks are not resolved. */

  for (p = source, q = source + source_len, d = dest; p < q; /*nothing*/) {
    hd = (header_t) *p++;
    sz = Wosize_hd(hd);
    tag = Tag_hd(hd);
    forward_addr = (uint32_t *) p;
    dest_ofs = d + 1 - dest;
    switch(tag) {
    case String_tag:
      { mlsize_t ofs_last_byte, len, new_sz;
        ofs_last_byte = sz * sizeof(value32) - 1;
        len = ofs_last_byte - Byte(p, ofs_last_byte);
        new_sz = (sz * sizeof(value32) + sizeof(value) - 1) / sizeof(value);
        *d++ = Make_header(new_sz, String_tag, color);
        Field(d, new_sz - 1) = 0;
        bcopy((char *)p, (char *)d, len);
        ofs_last_byte = new_sz * sizeof(value) - 1;
        Byte(d, ofs_last_byte) = ofs_last_byte - len;
        p += sz;
        d += new_sz;
        break;
      }
    case Double_tag:
      *d++ = Make_header(Double_wosize, Double_tag, color);
      /* Cannot do *((double *) d) = *((double *) p) directly
         because p might not be 64-aligned. */
      assert(sizeof(double) == sizeof(value));
      ((value32 *) d)[0] = p[0];
      ((value32 *) d)[1] = p[1];
      p += sizeof(double) / sizeof(value32);
      d += 1;
      break;
    default:
      *d++ = Make_header(sz, tag, color);
      for (/*nothing*/; sz > 0; sz--, p++, d++) {
        if ((*p & 1) == 0) {
          *d = *((uint32_t *) p);         /* copy, zero expansion */
        } else {
          *d = *((int32_t *) p);          /* copy, sign expansion */
        }
      }
      break;
    }
    *forward_addr = dest_ofs;   /* store the forwarding pointer */
  }
  assert(d == dest + dest_len);

  /* Second pass: resolve pointers contained inside blocks,
     replacing them by the corresponding forwarding pointer. */

  for (d = dest, e = dest + dest_len; d < e; /*nothing*/) {
    hd = (header_t) *d++;
    sz = Wosize_hd(hd);
    tag = Tag_hd(hd);
    if (tag >= No_scan_tag) {
      d += sz;
    } else {
      for (/*nothing*/; sz > 0; sz--, d++) {
        v = *d;
        switch(v & 3) {
        case 0:                 /* 0: a block represented by its offset */
          assert(v >= 0 && v < source_len * sizeof(value32) && (v & 3) == 0);
          *d = (value) (dest + *((uint32_t *)((char *) source + v)));
          break;
        case 2:                 /* 2: an atom */
          v = v >> 2;
          assert(v >= 0 && v < 256);
          *d = Atom(v);
          break;
        default:                /* 1 or 3: an integer */
          break;
        }
      }
    }
  }
}
Exemplo n.º 29
0
   This makes the merging of adjacent free blocks possible.
   (See [fl_merge_block].)
*/

typedef struct {
  char *next_bp;   /* Pointer to the first byte of the next block. */
} block;

/* The sentinel can be located anywhere in memory, but it must not be
   adjacent to any heap object. */
static struct {
  value filler1; /* Make sure the sentinel is never adjacent to any block. */
  header_t h;
  value first_bp;
  value filler2; /* Make sure the sentinel is never adjacent to any block. */
} sentinel = {0, Make_header (0, 0, Blue), 0, 0};

#define Fl_head ((char *) (&(sentinel.first_bp)))
static char *fl_prev = Fl_head;  /* Current allocation pointer. */
static char *fl_last = NULL;     /* Last block in the list.  Only valid
                                    just after fl_allocate returned NULL. */
char *fl_merge = Fl_head;        /* Current insertion pointer.  Managed
                                    jointly with [sweep_slice]. */

#define Next(b) (((block *) (b))->next_bp)

#ifdef DEBUG
void fl_check ()
{
  char *cur, *prev;
  int prev_found = 0, merge_found = 0;