Ejemplo n.º 1
0
static void init_atoms(void)
{
  extern struct segment caml_data_segments[], caml_code_segments[];
  int i;

  for (i = 0; i < 256; i++) {
    caml_atom_table[i] = Make_header(0, i, Caml_white);
  }
  if (caml_page_table_add(In_static_data,
                          caml_atom_table, caml_atom_table + 256) != 0)
    caml_fatal_error("Fatal error: not enough memory for the initial page table");

  for (i = 0; caml_data_segments[i].begin != 0; i++) {
    if (caml_page_table_add(In_static_data,
                            caml_data_segments[i].begin,
                            caml_data_segments[i].end) != 0)
      caml_fatal_error("Fatal error: not enough memory for the initial page table");
  }

  caml_code_area_start = caml_code_segments[0].begin;
  caml_code_area_end = caml_code_segments[0].end;
  for (i = 1; caml_code_segments[i].begin != 0; i++) {
    if (caml_code_segments[i].begin < caml_code_area_start)
      caml_code_area_start = caml_code_segments[i].begin;
    if (caml_code_segments[i].end > caml_code_area_end)
      caml_code_area_end = caml_code_segments[i].end;
  }
}
Ejemplo n.º 2
0
CAMLprim value caml_natdynlink_run(void *handle, value symbol) {
  CAMLparam1 (symbol);
  CAMLlocal1 (result);
  void *sym,*sym2;

#define optsym(n) getsym(handle,unit,n)
  char *unit;
  void (*entrypoint)(void);

  unit = String_val(symbol);

  sym = optsym("__frametable");
  if (NULL != sym) caml_register_frametable(sym);

  sym = optsym("");
  if (NULL != sym) caml_register_dyn_global(sym);

  sym = optsym("__data_begin");
  sym2 = optsym("__data_end");
  if (NULL != sym && NULL != sym2)
    caml_page_table_add(In_static_data, sym, sym2);

  sym = optsym("__code_begin");
  sym2 = optsym("__code_end");
  if (NULL != sym && NULL != sym2)
    caml_page_table_add(In_code_area, sym, sym2);

  entrypoint = optsym("__entry");
  if (NULL != entrypoint) result = caml_callback((value)(&entrypoint), 0);
  else result = Val_unit;

#undef optsym

  CAMLreturn (result);
}
Ejemplo n.º 3
0
void caml_set_minor_heap_size (asize_t size)
{
  char *new_heap;
  void *new_heap_base;

  Assert (size >= Minor_heap_min);
  Assert (size <= Minor_heap_max);
  Assert (size % sizeof (value) == 0);
  if (caml_young_ptr != caml_young_end) caml_minor_collection ();
                                    Assert (caml_young_ptr == caml_young_end);
  new_heap = caml_aligned_malloc(size, 0, &new_heap_base);
  if (new_heap == NULL) caml_raise_out_of_memory();
  if (caml_page_table_add(In_young, new_heap, new_heap + size) != 0)
    caml_raise_out_of_memory();

  if (caml_young_start != NULL){
    caml_page_table_remove(In_young, caml_young_start, caml_young_end);
    free (caml_young_base);
  }
  caml_young_base = new_heap_base;
  caml_young_start = new_heap;
  caml_young_end = new_heap + size;
  caml_young_limit = caml_young_start;
  caml_young_ptr = caml_young_end;
  caml_minor_heap_size = size;

  reset_table (&caml_ref_table);
  reset_table (&caml_weak_ref_table);
}
Ejemplo n.º 4
0
/* Take a chunk of memory as argument, which must be the result of a
   call to [caml_alloc_for_heap], and insert it into the heap chaining.
   The contents of the chunk must be a sequence of valid blocks and
   fragments: no space between blocks and no trailing garbage.  If
   some blocks are blue, they must be added to the free list by the
   caller.  All other blocks must have the color [caml_allocation_color(m)].
   The caller must update [caml_allocated_words] if applicable.
   Return value: 0 if no error; -1 in case of error.

   See also: caml_compact_heap, which duplicates most of this function.
*/
int caml_add_to_heap (char *m)
{
                                     Assert (Chunk_size (m) % Page_size == 0);
#ifdef DEBUG
  /* Should check the contents of the block. */
#endif /* debug */

  caml_gc_message (0x04, "Growing heap to %luk bytes\n",
                   (caml_stat_heap_size + Chunk_size (m)) / 1024);

  /* Register block in page table */
  if (caml_page_table_add(In_heap, m, m + Chunk_size(m)) != 0)
    return -1;

  /* Chain this heap chunk. */
  {
    char **last = &caml_heap_start;
    char *cur = *last;

    while (cur != NULL && cur < m){
      last = &(Chunk_next (cur));
      cur = *last;
    }
    Chunk_next (m) = cur;
    *last = m;

    ++ caml_stat_heap_chunks;
  }

  caml_stat_heap_size += Chunk_size (m);
  if (caml_stat_heap_size > caml_stat_top_heap_size){
    caml_stat_top_heap_size = caml_stat_heap_size;
  }
  return 0;
}
Ejemplo n.º 5
0
void caml_set_minor_heap_size (asize_t size)
{
  char *new_heap;
  void *new_heap_base;

  Assert (size >= Minor_heap_min);
  Assert (size <= Minor_heap_max);
  Assert (size % sizeof (value) == 0);
  if (caml_young_ptr != caml_young_end) caml_minor_collection ();
                                    Assert (caml_young_ptr == caml_young_end);
  new_heap = caml_aligned_malloc(size, 0, &new_heap_base);
  if (new_heap == NULL) caml_raise_out_of_memory();
  if (caml_page_table_add(In_young, new_heap, new_heap + size) != 0)
    caml_raise_out_of_memory();

  if (caml_young_start != NULL){
#ifdef SYS_xen
/* XXX temporary until memory allocator works properly */
    printk("caml_set_minor_heap_size: resize unsupported\n");
    caml_raise_out_of_memory();
#else
    caml_page_table_remove(In_young, caml_young_start, caml_young_end);
    free (caml_young_base);
#endif
  }
  caml_young_base = new_heap_base;
  caml_young_start = new_heap;
  caml_young_end = new_heap + size;
  caml_young_limit = caml_young_start;
  caml_young_ptr = caml_young_end;
  caml_minor_heap_size = size;

  reset_table (&caml_ref_table);
  reset_table (&caml_weak_ref_table);
}
Ejemplo n.º 6
0
static void init_atoms(void)
{
  int i;
  for(i = 0; i < 256; i++) caml_atom_table[i] = Make_header(0, i, Caml_white);
  if (caml_page_table_add(In_static_data,
                          caml_atom_table, caml_atom_table + 256) != 0) {
    caml_fatal_error("Fatal error: not enough memory for the initial page table");
  }
}
Ejemplo n.º 7
0
void caml_compact_heap (void)
{
    uintnat target_size, live;

    do_compaction ();
    /* Compaction may fail to shrink the heap to a reasonable size
       because it deals in complete chunks: if a very large chunk
       is at the beginning of the heap, everything gets moved to
       it and it is not freed.

       In that case, we allocate a new chunk of the desired heap
       size, chain it at the beginning of the heap (thus pretending
       its address is smaller), and launch a second compaction.
       This will move all data to this new chunk and free the
       very large chunk.

       See PR#5389
    */
    /* We compute:
       freewords = caml_fl_cur_size          (exact)
       heapsize = caml_heap_size             (exact)
       live = heap_size - freewords
       target_size = live * (1 + caml_percent_free / 100)
                   = live / 100 * (100 + caml_percent_free)
       We add 1 to live/100 to make sure it isn't 0.

       We recompact if target_size < heap_size / 2
    */
    live = caml_stat_heap_size - Bsize_wsize (caml_fl_cur_size);
    target_size = (live / 100 + 1) * (100 + caml_percent_free);
    target_size = caml_round_heap_chunk_size (target_size);
    if (target_size < caml_stat_heap_size / 2) {
        char *chunk;

        /* round it up to a page size */
        chunk = caml_alloc_for_heap (target_size);
        if (chunk == NULL) return;
        caml_make_free_blocks ((value *) chunk,
                               Wsize_bsize (Chunk_size (chunk)), 0);
        if (caml_page_table_add (In_heap, chunk, chunk + Chunk_size (chunk)) != 0) {
            caml_free_for_heap (chunk);
            return;
        }
        Chunk_next (chunk) = caml_heap_start;
        caml_heap_start = chunk;
        ++ caml_stat_heap_chunks;
        caml_stat_heap_size += Chunk_size (chunk);
        if (caml_stat_heap_size > caml_stat_top_heap_size) {
            caml_stat_top_heap_size = caml_stat_heap_size;
        }
        do_compaction ();
        Assert (caml_stat_heap_chunks == 1);
        Assert (Chunk_next (caml_heap_start) == NULL);
    }
}
Ejemplo n.º 8
0
CAMLprim value caml_natdynlink_run(void *handle, value symbol) {
  CAMLparam1 (symbol);
  CAMLlocal1 (result);
  void *sym,*sym2;
  struct code_fragment * cf;

#define optsym(n) getsym(handle,unit,n)
  char *unit;
  void (*entrypoint)(void);

  unit = String_val(symbol);

  sym = optsym("__frametable");
  if (NULL != sym) caml_register_frametable(sym);

  sym = optsym("");
  if (NULL != sym) caml_register_dyn_global(sym);

  sym = optsym("__data_begin");
  sym2 = optsym("__data_end");
  if (NULL != sym && NULL != sym2)
    caml_page_table_add(In_static_data, sym, sym2);

  sym = optsym("__code_begin");
  sym2 = optsym("__code_end");
  if (NULL != sym && NULL != sym2) {
    caml_page_table_add(In_code_area, sym, sym2);
    cf = caml_stat_alloc(sizeof(struct code_fragment));
    cf->code_start = (char *) sym;
    cf->code_end = (char *) sym2;
    cf->digest_computed = 0;
    caml_ext_table_add(&caml_code_fragments_table, cf);
  }

  entrypoint = optsym("__entry");
  if (NULL != entrypoint) result = caml_callback((value)(&entrypoint), 0);
  else result = Val_unit;

#undef optsym

  CAMLreturn (result);
}
Ejemplo n.º 9
0
static void init_atoms(void)
{
  extern struct segment caml_data_segments[], caml_code_segments[];
  int i;
  struct code_fragment * cf;

  for (i = 0; i < 256; i++) {
    caml_atom_table[i] = Make_header(0, i, Caml_white);
  }
  if (caml_page_table_add(In_static_data,
                          caml_atom_table, caml_atom_table + 256) != 0)
    caml_fatal_error("Fatal error: not enough memory for initial page table");

  for (i = 0; caml_data_segments[i].begin != 0; i++) {
    /* PR#5509: we must include the zero word at end of data segment,
       because pointers equal to caml_data_segments[i].end are static data. */
    if (caml_page_table_add(In_static_data,
                            caml_data_segments[i].begin,
                            caml_data_segments[i].end + sizeof(value)) != 0)
      caml_fatal_error("Fatal error: not enough memory for initial page table");
  }

  caml_code_area_start = caml_code_segments[0].begin;
  caml_code_area_end = caml_code_segments[0].end;
  for (i = 1; caml_code_segments[i].begin != 0; i++) {
    if (caml_code_segments[i].begin < caml_code_area_start)
      caml_code_area_start = caml_code_segments[i].begin;
    if (caml_code_segments[i].end > caml_code_area_end)
      caml_code_area_end = caml_code_segments[i].end;
  }
  /* Register the code in the table of code fragments */
  cf = caml_stat_alloc(sizeof(struct code_fragment));
  cf->code_start = caml_code_area_start;
  cf->code_end = caml_code_area_end;
  cf->digest_computed = 0;
  caml_ext_table_init(&caml_code_fragments_table, 8);
  caml_ext_table_add(&caml_code_fragments_table, cf);
}
Ejemplo n.º 10
0
CAMLprim value netsys_value_area_add(value memv) 
{
#ifdef FANCY_PAGE_TABLES
    struct caml_bigarray *b = Bigarray_val(memv);
    int code;
    code = caml_page_table_add(In_static_data,
			       b->data,
			       b->data + b->dim[0]);
    if (code != 0) 
	failwith("Netsys_mem.value_area: error");
    return Val_unit;
#else
    invalid_argument("Netsys_mem.value_area");
#endif
}
Ejemplo n.º 11
0
void caml_array_bound_error(void)
{
  if (! array_bound_error_bucket_inited) {
    mlsize_t wosize = (BOUND_MSG_LEN + sizeof(value)) / sizeof(value);
    mlsize_t offset_index = Bsize_wsize(wosize) - 1;
    array_bound_error_msg.hdr = Make_header(wosize, String_tag, Caml_white);
    array_bound_error_msg.data[offset_index] = offset_index - BOUND_MSG_LEN;
    array_bound_error_bucket.hdr = Make_header(2, 0, Caml_white);
    array_bound_error_bucket.exn = (value) caml_exn_Invalid_argument;
    array_bound_error_bucket.arg = (value) array_bound_error_msg.data;
    array_bound_error_bucket_inited = 1;
    caml_page_table_add(In_static_data,
                        &array_bound_error_msg,
                        &array_bound_error_msg + 1);
    array_bound_error_bucket_inited = 1;
  }
  caml_raise((value) &array_bound_error_bucket.exn);
}