value caml_aligned_array_create(size_t alignment, value len) { CAMLparam1 (len); void* bp; mlsize_t bosize; int result; bosize = (Int_val(len) + 1) * alignment; result = posix_memalign(&bp, alignment, bosize); if (result != 0) { if (result == EINVAL) caml_failwith( "The alignment was not a power of two, or was not a multiple of sizeof(void *)"); else if (result == ENOMEM) caml_raise_out_of_memory(); else caml_failwith("Unrecognized error"); } /* Leave space for the header */ bp += alignment; Hd_bp (bp) = Make_header (Wosize_bhsize(Bhsize_bosize(bosize - alignment)), Double_array_tag, Caml_white); CAMLreturn (Val_bp(bp)); }
void caml_set_fields (char *bp, unsigned long start, unsigned long filler) { mlsize_t i; for (i = start; i < Wosize_bp (bp); i++){ Field (Val_bp (bp), i) = (value) filler; } }
cairo_status_t ml_cairo_unsafe_read_func (void *closure, unsigned char *data, unsigned int length) { value res, *c = closure; res = caml_callback2_exn (Field (*c, 0), Val_bp (data), Val_int (length)); if (Is_exception_result (res)) { Store_field (*c, 1, res); return CAIRO_STATUS_READ_ERROR; } return CAIRO_STATUS_SUCCESS; }
/* This is a heap extension. We have to insert it in the right place in the free-list. [caml_fl_add_block] can only be called right after a call to [caml_fl_allocate] that returned NULL. Most of the heap extensions are expected to be at the end of the free list. (This depends on the implementation of [malloc].) */ void caml_fl_add_block (char *bp) { Assert (fl_last != NULL); Assert (Next (fl_last) == NULL); #ifdef DEBUG { mlsize_t i; for (i = 0; i < Wosize_bp (bp); i++){ Field (Val_bp (bp), i) = Debug_free_major; } } #endif caml_fl_cur_size += Whsize_bp (bp); if (bp > fl_last){ Next (fl_last) = bp; Next (bp) = NULL; }else{ char *cur, *prev; prev = Fl_head; cur = Next (prev); while (cur != NULL && cur < bp){ Assert (prev < bp || prev == Fl_head); prev = cur; cur = Next (prev); } Assert (prev < bp || prev == Fl_head); Assert (cur > bp || cur == NULL); Next (bp) = cur; Next (prev) = bp; /* When inserting a block between [caml_fl_merge] and [caml_gc_sweep_hp], we must advance [caml_fl_merge] to the new block, so that [caml_fl_merge] is always the last free-list block before [caml_gc_sweep_hp]. */ if (prev == caml_fl_merge && bp <= caml_gc_sweep_hp) caml_fl_merge = bp; } }
{ Type_Int, [](Smoke::StackItem item) { return alloc_variant<DataType> (Type_Int, Val_int (item.s_int)); }, [](value val, Smoke::StackItem &item) { item.s_int = Int_val (Field (val, 0)); }, } }, { "int&", { Type_IntRef, nullptr, // XXX: memory leak! [](value val, Smoke::StackItem &item) { item.s_voidp = new int (Int_val (Field (Field (val, 0), 0))); }, } }, { "const char*", { Type_String, [](Smoke::StackItem item) { return alloc_variant<DataType> (Type_String, Val_bp (item.s_voidp)); }, [](value val, Smoke::StackItem &item) { item.s_voidp = String_val (Field (val, 0)); }, } }, { "char**", { Type_VoidP, nullptr, [](value val, Smoke::StackItem &item) { item.s_voidp = Bp_val (Field (val, 0)); }, } }, { "QObject*", { Type_ClassP, [](Smoke::StackItem item) { return alloc_variant<DataType> (Type_ClassP, Val_bp (item.s_voidp)); }, [](value val, Smoke::StackItem &item) { item.s_voidp = Bp_val (Field (val, 0)); },
/* [caml_fl_merge_block] returns the head pointer of the next block after [bp], because merging blocks may change the size of [bp]. */ char *caml_fl_merge_block (char *bp) { char *prev, *cur, *adj; header_t hd = Hd_bp (bp); mlsize_t prev_wosz; caml_fl_cur_size += Whsize_hd (hd); #ifdef DEBUG { mlsize_t i; for (i = 0; i < Wosize_hd (hd); i++){ Field (Val_bp (bp), i) = Debug_free_major; } } #endif prev = caml_fl_merge; cur = Next (prev); /* The sweep code makes sure that this is the right place to insert this block: */ Assert (prev < bp || prev == Fl_head); Assert (cur > bp || cur == NULL); /* If [last_fragment] and [bp] are adjacent, merge them. */ if (last_fragment == Hp_bp (bp)){ mlsize_t bp_whsz = Whsize_bp (bp); if (bp_whsz <= Max_wosize){ hd = Make_header (bp_whsz, 0, Caml_white); bp = last_fragment; Hd_bp (bp) = hd; caml_fl_cur_size += Whsize_wosize (0); } } /* If [bp] and [cur] are adjacent, remove [cur] from the free-list and merge them. */ adj = bp + Bosize_hd (hd); if (adj == Hp_bp (cur)){ char *next_cur = Next (cur); mlsize_t cur_whsz = Whsize_bp (cur); if (Wosize_hd (hd) + cur_whsz <= Max_wosize){ Next (prev) = next_cur; if (fl_prev == cur) fl_prev = prev; hd = Make_header (Wosize_hd (hd) + cur_whsz, 0, Caml_blue); Hd_bp (bp) = hd; adj = bp + Bosize_hd (hd); #ifdef DEBUG fl_last = NULL; Next (cur) = (char *) Debug_free_major; Hd_bp (cur) = Debug_free_major; #endif cur = next_cur; } } /* If [prev] and [bp] are adjacent merge them, else insert [bp] into the free-list if it is big enough. */ prev_wosz = Wosize_bp (prev); if (prev + Bsize_wsize (prev_wosz) == Hp_bp (bp) && prev_wosz + Whsize_hd (hd) < Max_wosize){ Hd_bp (prev) = Make_header (prev_wosz + Whsize_hd (hd), 0,Caml_blue); #ifdef DEBUG Hd_bp (bp) = Debug_free_major; #endif Assert (caml_fl_merge == prev); }else if (Wosize_hd (hd) != 0){ Hd_bp (bp) = Bluehd_hd (hd); Next (bp) = cur; Next (prev) = bp; caml_fl_merge = bp; }else{ /* This is a fragment. Leave it in white but remember it for eventual merging with the next block. */ last_fragment = bp; caml_fl_cur_size -= Whsize_wosize (0); } return adj; }
/* Wrapping of malloc'ed C pointers in Abstract blocks. */ value abstract_ptr(void *p) { value v = alloc_small(1, Abstract_tag); Field(v, 0) = Val_bp(p); return v; }