/* Allocate a page-aligned bigarray of length [n_pages] pages. Since CAML_BA_MANAGED is set the bigarray C finaliser will call free() whenever all sub-bigarrays are unreachable. */ CAMLprim value mirage_alloc_pages(value did_gc, value n_pages) { CAMLparam2(did_gc, n_pages); size_t len = Int_val(n_pages) * PAGE_SIZE; /* If the allocation fails, return None. The ocaml layer will be able to trigger a full GC which just might run finalizers of unused bigarrays which will free some memory. */ void* block = malloc(len); if (block == NULL) { if (Bool_val(did_gc)) printf("ERROR: Io_page: memalign(%d, %zu) failed, even after GC.\n", PAGE_SIZE, len); caml_raise_out_of_memory(); } /* Explicitly zero the page before returning it */ memset(block, 0, len); /* OCaml 4.02 introduced bigarray element type CAML_BA_CHAR, which needs to be used - otherwise type t in io_page.ml is different from the allocated bigarray and equality won't hold. Only since 4.02 there is a <caml/version.h>, thus we cannot include it in order to detect the version of the OCaml runtime. Instead, we use definitions which were introduced by 4.02 - and cross fingers that they'll stay there in the future. Once <4.02 support is removed, we should get rid of this hack. -- hannes, 16th Feb 2015 */ #ifdef Caml_ba_kind_val CAMLreturn(caml_ba_alloc_dims(CAML_BA_CHAR | CAML_BA_C_LAYOUT | CAML_BA_MANAGED, 1, block, len)); #else CAMLreturn(caml_ba_alloc_dims(CAML_BA_UINT8 | CAML_BA_C_LAYOUT | CAML_BA_MANAGED, 1, block, len)); #endif }
/* Allocate a page-aligned bigarray of length [n_pages] pages. Since CAML_BA_MANAGED is set the bigarray C finaliser will call free() whenever all sub-bigarrays are unreachable. */ CAMLprim value caml_alloc_pages(value did_gc, value n_pages) { CAMLparam2(did_gc, n_pages); size_t len = Int_val(n_pages) * PAGE_SIZE; /* If the allocation fails, return None. The ocaml layer will be able to trigger a full GC which just might run finalizers of unused bigarrays which will free some memory. */ #ifdef __MINIOS__ void* block = _xmalloc(len, PAGE_SIZE); if (block == NULL) { #elif _WIN32 /* NB we can't use _aligned_malloc because we can't get OCaml to finalize with _aligned_free. Regular free() will not work. */ static int printed_warning = 0; if (!printed_warning) { printed_warning = 1; printk("WARNING: Io_page on Windows doesn't guarantee alignment\n"); } void *block = malloc(len); if (block == NULL) { #else void* block = NULL; int ret = posix_memalign(&block, PAGE_SIZE, len); if (ret < 0) { #endif if (Bool_val(did_gc)) printk("Io_page: memalign(%d, %zu) failed, even after GC.\n", PAGE_SIZE, len); caml_raise_out_of_memory(); } /* Explicitly zero the page before returning it */ memset(block, 0, len); /* OCaml 4.02 introduced bigarray element type CAML_BA_CHAR, which needs to be used - otherwise type t in io_page.ml is different from the allocated bigarray and equality won't hold. Only since 4.02 there is a <caml/version.h>, thus we cannot include it in order to detect the version of the OCaml runtime. Instead, we use definitions which were introduced by 4.02 - and cross fingers that they'll stay there in the future. Once <4.02 support is removed, we should get rid of this hack. -- hannes, 16th Feb 2015 */ #ifdef Caml_ba_kind_val CAMLreturn(caml_ba_alloc_dims(CAML_BA_CHAR | CAML_BA_C_LAYOUT | CAML_BA_MANAGED, 1, block, len)); #else CAMLreturn(caml_ba_alloc_dims(CAML_BA_UINT8 | CAML_BA_C_LAYOUT | CAML_BA_MANAGED, 1, block, len)); #endif }
CAMLprim value stub_gnttab_map_fresh( value xgh, value reference, value domid, value writable ) { CAMLparam4(xgh, reference, domid, writable); CAMLlocal2(pair, contents); void *map = xc_gnttab_map_grant_ref(_G(xgh), Int_val(domid), Int_val(reference), Bool_val(writable)?PROT_READ | PROT_WRITE:PROT_READ); if(map==NULL) { caml_failwith("Failed to map grant ref"); } contents = caml_ba_alloc_dims(XC_GNTTAB_BIGARRAY, 1, map, 1 << XC_PAGE_SHIFT); pair = caml_alloc_tuple(2); Store_field(pair, 0, contents); /* grant_handle */ Store_field(pair, 1, contents); /* Io_page.t */ CAMLreturn(pair); }
CAMLprim value stub_gnttab_mapv_batched( value xgh, value array, value writable) { CAMLparam3(xgh, array, writable); CAMLlocal4(domid, reference, contents, pair); int count = Wosize_val(array) / 2; uint32_t domids[count]; uint32_t refs[count]; int i; for (i = 0; i < count; i++) { domids[i] = Int_val(Field(array, i * 2 + 0)); refs[i] = Int_val(Field(array, i * 2 + 1)); } void *map = xc_gnttab_map_grant_refs(_G(xgh), count, domids, refs, Bool_val(writable)?PROT_READ | PROT_WRITE : PROT_READ); if(map==NULL) { caml_failwith("Failed to map grant ref"); } contents = caml_ba_alloc_dims(XC_GNTTAB_BIGARRAY, 1, map, count << XC_PAGE_SHIFT); pair = caml_alloc_tuple(2); Store_field(pair, 0, contents); /* grant_handle */ Store_field(pair, 1, contents); /* Io_page.t */ CAMLreturn(pair); }
value ffmpeg_frame_buffer(value frame) { CAMLparam1(frame); CAMLreturn(caml_ba_alloc_dims(CAML_BA_INT32, 1, AVFrame_val(frame)->data[0], AVFrame_val(frame)->linesize[0] * AVFrame_val(frame)->height)); }
CAMLprim value caml_xenstore_start_page(value v_unit) { CAMLparam1(v_unit); CAMLreturn(caml_ba_alloc_dims(CAML_BA_UINT8 | CAML_BA_C_LAYOUT, 1, mfn_to_virt(start_info.store_mfn), (long)PAGE_SIZE)); }
static uint64 on_read (utp_callback_arguments* a) { CAMLparam0 (); CAMLlocal1 (ba); static value *on_read_fun = NULL; if (on_read_fun == NULL) on_read_fun = caml_named_value ("utp_on_read"); ba = caml_ba_alloc_dims (CAML_BA_UINT8 | CAML_BA_C_LAYOUT, 1, (void *) a->buf, a->len); caml_callback2 (*on_read_fun, Val_utp_socket (a->socket), ba); utp_read_drained (a->socket); CAMLreturn (0); }
/* Allocate a page-aligned bigarray of length [n_pages] pages. Since CAML_BA_MANAGED is set the bigarray C finaliser will call free() whenever all sub-bigarrays are unreachable. */ CAMLprim value caml_alloc_pages(value n_pages) { CAMLparam1(n_pages); size_t len = Int_val(n_pages) * PAGE_SIZE; /* If the allocation fails, return None. The ocaml layer will be able to trigger a full GC which just might run finalizers of unused bigarrays which will free some memory. */ void* block = memalign(PAGE_SIZE, len); if (block == NULL) { caml_failwith("memalign"); } CAMLreturn(caml_ba_alloc_dims(CAML_BA_UINT8 | CAML_BA_C_LAYOUT | CAML_BA_MANAGED, 1, block, len)); }
static uint64 on_sendto (utp_callback_arguments *a) { CAMLparam0 (); CAMLlocal2 (addr, buf); union sock_addr_union sock_addr; socklen_param_type sock_addr_len; static value *on_sendto_fun = NULL; if (on_sendto_fun == NULL) on_sendto_fun = caml_named_value ("utp_on_sendto"); sock_addr_len = sizeof (struct sockaddr_in); memcpy (&sock_addr.s_inet, (struct sockaddr_in *) a->address, sock_addr_len); addr = alloc_sockaddr (&sock_addr, sock_addr_len, 0); buf = caml_ba_alloc_dims (CAML_BA_UINT8 | CAML_BA_C_LAYOUT, 1, (void *) a->buf, a->len); caml_callback3 (*on_sendto_fun, Val_utp_context (a->context), addr, buf); CAMLreturn (0); }
CAMLprim value caml_alloc_pages(value n_pages) { CAMLparam1(n_pages); CAMLlocal1(result); size_t len; unsigned long block; len = Int_val(n_pages); block = (unsigned long) __contigmalloc(PAGE_SIZE * len, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0ul); if (block == 0) caml_failwith("contigmalloc"); result = caml_ba_alloc_dims(CAML_BA_UINT8 | CAML_BA_C_LAYOUT | CAML_BA_FBSD_IOPAGE, 1, (void *) block, (long) PAGE_SIZE * len); CAMLreturn(result); }
CAMLprim value stub_xc_gntshr_share_pages(value xgh, value domid, value count, value writeable) { CAMLparam4(xgh, domid, count, writeable); CAMLlocal4(result, ml_refs, ml_refs_cons, ml_map); #ifdef HAVE_GNTSHR void *map; uint32_t *refs; uint32_t c_domid; int c_count; int i; c_count = Int_val(count); c_domid = Int32_val(domid); result = caml_alloc(2, 0); refs = (uint32_t *) malloc(c_count * sizeof(uint32_t)); map = xc_gntshr_share_pages(_G(xgh), c_domid, c_count, refs, Bool_val(writeable)); if(NULL == map) { free(refs); failwith_xc(_G(xgh)); } // Construct the list of grant references. ml_refs = Val_emptylist; for(i = c_count - 1; i >= 0; i--) { ml_refs_cons = caml_alloc(2, 0); Store_field(ml_refs_cons, 0, caml_copy_int32(refs[i])); Store_field(ml_refs_cons, 1, ml_refs); ml_refs = ml_refs_cons; } ml_map = caml_ba_alloc_dims(XC_GNTTAB_BIGARRAY, 1, map, c_count << XC_PAGE_SHIFT); Store_field(result, 0, ml_refs); Store_field(result, 1, ml_map); free(refs); #else gntshr_missing(); #endif CAMLreturn(result); }
value create_event(smf_event_t *event) { CAMLparam0(); CAMLlocal2(ret, ans); Event_t *et; ret = caml_alloc_tuple(7); Field(ret, 0) = Val_int(event->event_number); Field(ret, 1) = Val_int(event->delta_time_pulses); Field(ret, 2) = Val_int(event->time_pulses); Field(ret, 3) = caml_copy_double(event->time_seconds); Field(ret, 4) = Val_int(event->track_number); Field(ret, 5) = caml_ba_alloc_dims(CAML_BA_UINT8 | CAML_BA_C_LAYOUT, 1, (void*)event->midi_buffer, event->midi_buffer_length, NULL); et = malloc(sizeof(Event_t)); et->t = event; ans = caml_alloc_custom(&event_ops, sizeof(Event_t*), 1, 0); Event_t_val(ans) = et; Field(ret, 6) = ans; CAMLreturn(ret); }
CAMLprim value c_dls_dense_wrap(DlsMat a, int finalize) { CAMLparam0(); CAMLlocal3(vv, va, vr); mlsize_t approx_size = a->ldim * a->N * sizeof(realtype) + 1; va = caml_ba_alloc_dims(BIGARRAY_FLOAT, 2, a->data, a->N, a->ldim); /* a DlsMat is a pointer to a struct _DlsMat */ vv = caml_alloc_final(2, finalize ? &finalize_dlsmat : NULL, approx_size, approx_size * 20); DLSMAT(vv) = a; vr = caml_alloc_tuple(3); Store_field(vr, RECORD_DLS_DENSEMATRIX_PAYLOAD, va); Store_field(vr, RECORD_DLS_DENSEMATRIX_DLSMAT, vv); Store_field(vr, RECORD_DLS_DENSEMATRIX_VALID, Val_bool(1)); CAMLreturn(vr); }