CAMLprim value bin_prot_blit_buf_stub( value v_src_pos, value v_src, value v_dst_pos, value v_dst, value v_len) { struct caml_ba_array *ba_src = Caml_ba_array_val(v_src); struct caml_ba_array *ba_dst = Caml_ba_array_val(v_dst); char *src = (char *) ba_src->data + Long_val(v_src_pos); char *dst = (char *) ba_dst->data + Long_val(v_dst_pos); size_t len = (size_t) Long_val(v_len); if ( unlikely(len > 65536) || unlikely(((ba_src->flags & CAML_BA_MAPPED_FILE) != 0)) || unlikely(((ba_dst->flags & CAML_BA_MAPPED_FILE) != 0)) ) /* use [memmove] rather than [memcpy] because src and dst may overlap */ { Begin_roots2(v_src, v_dst); caml_enter_blocking_section(); memmove(dst, src, len); caml_leave_blocking_section(); End_roots(); } else memmove(dst, src, len); return Val_unit; }
CAMLprim value lwt_unix_bytes_write(value fd, value buf, value vofs, value vlen) { intnat ofs, len, written; DWORD numbytes, numwritten; DWORD err = 0; Begin_root (buf); ofs = Long_val(vofs); len = Long_val(vlen); written = 0; if (len > 0) { numbytes = len; if (Descr_kind_val(fd) == KIND_SOCKET) { int ret; SOCKET s = Socket_val(fd); ret = send(s, (char*)Caml_ba_array_val(buf)->data + ofs, numbytes, 0); if (ret == SOCKET_ERROR) err = WSAGetLastError(); numwritten = ret; } else { HANDLE h = Handle_val(fd); if (! WriteFile(h, (char*)Caml_ba_array_val(buf)->data + ofs, numbytes, &numwritten, NULL)) err = GetLastError(); } if (err) { win32_maperr(err); uerror("write", Nothing); } written = numwritten; } End_roots(); return Val_long(written); }
int sundials_ml_event_wrapper(realtype tt, N_Vector yy, N_Vector yp, realtype *gout, void* user_data) { value ev = Field(*(value*)user_data, 2); value ev_state = Field(*(value*)user_data, 3); double* t = (double*)Field(ev_state, 0); *t = tt; double* old_y = Caml_ba_array_val(Field(ev_state, 1))->data; double* old_yp = Caml_ba_array_val(Field(ev_state, 2))->data; double* old_gi = Caml_ba_array_val(Field(ev_state, 3))->data; double* new_y = NV_DATA_S(yy); double* new_yp = NV_DATA_S(yp); Caml_ba_array_val(Field(ev_state, 1))->data = new_y; Caml_ba_array_val(Field(ev_state, 2))->data = new_yp; Caml_ba_array_val(Field(ev_state, 3))->data = gout; value ret = caml_callback(ev, ev_state); /* because we might have triggered a GC cycle, num_state can be invalid */ ev_state = Field(*(value*)user_data, 3); Caml_ba_array_val(Field(ev_state, 1))->data = old_y; Caml_ba_array_val(Field(ev_state, 2))->data = old_yp; Caml_ba_array_val(Field(ev_state, 3))->data = old_gi; return Int_val (ret); }
int sundials_ml_residual_wrapper(realtype tt, N_Vector yy, N_Vector yp, N_Vector rr, void* user_data) { value res = Field(*(value*)user_data, 0); value num_state = Field(*(value*)user_data, 1); double* t = (double*)Field(num_state, 0); *t = tt; double* old_yp = Caml_ba_array_val(Field(num_state, 1))->data; double* old_yy = Caml_ba_array_val(Field(num_state, 2))->data; double* old_rr = Caml_ba_array_val(Field(num_state, 3))->data; double* new_yy = NV_DATA_S(yy); double* new_yp = NV_DATA_S(yp); double* new_rr = NV_DATA_S(rr); Caml_ba_array_val(Field(num_state, 1))->data = new_yp; Caml_ba_array_val(Field(num_state, 2))->data = new_yy; Caml_ba_array_val(Field(num_state, 3))->data = new_rr; value ret = caml_callback(res, num_state); /* because we might have triggered a GC cycle, num_state can be invalid */ num_state = Field(*(value*)user_data, 1); Caml_ba_array_val(Field(num_state, 1))->data = old_yp; Caml_ba_array_val(Field(num_state, 2))->data = old_yy; Caml_ba_array_val(Field(num_state, 3))->data = old_rr; return Int_val (ret); }
CAMLprim value sundials_ml_fvector_scale(value s, value x, value z) { CAMLparam3(s,x,z); const double ds = Double_val(s); struct caml_ba_array* ba_x = Caml_ba_array_val(x); struct caml_ba_array* ba_z = Caml_ba_array_val(z); double* dx = (double*) ba_x -> data; double* dz = (double*) ba_z -> data; for(int i = 0; i < ba_x->dim[0]; i++) dz[i] = dx[i] * ds; CAMLreturn(Val_unit); }
CAMLprim value bigstring_memcmp_stub(value v_s1, value v_s1_pos, value v_s2, value v_s2_pos, value v_len) /* noalloc */ { struct caml_ba_array *ba_s1 = Caml_ba_array_val(v_s1); struct caml_ba_array *ba_s2 = Caml_ba_array_val(v_s2); char *s1 = (char *) ba_s1->data + Long_val(v_s1_pos); char *s2 = (char *) ba_s2->data + Long_val(v_s2_pos); int res; res = memcmp(s1, s2, Long_val(v_len)); if (res < 0) return Val_int(-1); if (res > 0) return Val_int(1); return Val_int(0); }
value caml_ba_change_flags(value vb, value vkind, value vlen) { CAMLparam3 (vb, vkind, vlen); CAMLlocal1 (res); #define b ((struct caml_ba_array *) Caml_ba_array_val(vb)) int flags = Int_val(vkind) | CAML_BA_C_LAYOUT | CAML_BA_MANAGED; intnat len = Long_val(vlen); res = caml_ba_alloc(flags, b->num_dims, b->data, b->dim); Caml_ba_array_val(res)->dim[0] = len; caml_ba_update_proxy(b, Caml_ba_array_val(res)); CAMLreturn (res); #undef b }
CAMLprim value ocaml_gstreamer_appsrc_push_buffer_data(value _as, value _buf) { CAMLparam2(_as, _buf); int buflen = Caml_ba_array_val(_buf)->dim[0]; appsrc *as = Appsrc_val(_as); GstBuffer *gstbuf; GstMapInfo map; GstFlowReturn ret; gboolean bret; caml_release_runtime_system(); gstbuf = gst_buffer_new_and_alloc(buflen); bret = gst_buffer_map(gstbuf, &map, GST_MAP_WRITE); caml_acquire_runtime_system(); if(!bret) caml_raise_constant(*caml_named_value("gstreamer_exn_failure")); memcpy(map.data, (unsigned char*)Caml_ba_data_val(_buf), buflen); caml_release_runtime_system(); gst_buffer_unmap(gstbuf, &map); ret = gst_app_src_push_buffer(as->appsrc, gstbuf); caml_acquire_runtime_system(); if (ret != GST_FLOW_OK) caml_raise_constant(*caml_named_value("gstreamer_exn_failure")); CAMLreturn(Val_unit); }
CAMLprim value ocaml_gstreamer_buffer_of_data(value _ba, value _off, value _len) { CAMLparam1(_ba); int bufoff = Int_val(_off); int buflen = Int_val(_len); GstBuffer *gstbuf; GstMapInfo map; gboolean bret; assert(buflen+bufoff <= Caml_ba_array_val(_ba)->dim[0]); caml_release_runtime_system(); gstbuf = gst_buffer_new_and_alloc(buflen); bret = gst_buffer_map(gstbuf, &map, GST_MAP_WRITE); caml_acquire_runtime_system(); if(!bret) caml_raise_constant(*caml_named_value("gstreamer_exn_failure")); memcpy(map.data, (unsigned char*)Caml_ba_data_val(_ba)+bufoff, buflen); caml_release_runtime_system(); gst_buffer_unmap(gstbuf, &map); caml_acquire_runtime_system(); CAMLreturn(value_of_buffer(gstbuf)); }
CAMLprim value bigstring_recvfrom_assume_fd_is_nonblocking_stub( value v_sock, value v_pos, value v_len, value v_bstr) { CAMLparam1(v_bstr); CAMLlocal1(v_addr); struct caml_ba_array *ba = Caml_ba_array_val(v_bstr); char *bstr = (char *) ba->data + Long_val(v_pos); size_t len = Long_val(v_len); ssize_t n_read; union sock_addr_union addr; socklen_param_type addr_len = sizeof(addr); value v_res; if (len > THREAD_IO_CUTOFF) { caml_enter_blocking_section(); n_read = recvfrom(Int_val(v_sock), bstr, len, 0, &addr.s_gen, &addr_len); caml_leave_blocking_section(); } else n_read = recvfrom(Int_val(v_sock), bstr, len, 0, &addr.s_gen, &addr_len); if (n_read == -1) uerror("bigstring_recvfrom_assume_fd_is_nonblocking", Nothing); v_addr = alloc_sockaddr(&addr, addr_len, -1); v_res = caml_alloc_small(2, 0); Field(v_res, 0) = Val_long(n_read); Field(v_res, 1) = v_addr; CAMLreturn(v_res); }
CAMLprim value sundials_ml_ida_init(value ida_solver, value ida_ctxt) { CAMLparam2(ida_solver, ida_ctxt); assert (Tag_val(ida_ctxt) == 0); assert (Tag_val(Field(ida_ctxt, 0)) == Closure_tag); assert (Tag_val(Field(ida_ctxt, 1)) == 0 ); assert (Tag_val(Field(Field(ida_ctxt, 1), 0)) == Double_tag ); IDA_CTXT(ida_solver) = ida_ctxt; caml_register_global_root(&IDA_CTXT(ida_solver)); const realtype rt_t0 = Double_val(NUMSTATE_T0(ida_solver)); value y0 = NUMSTATE_YY(ida_solver); value yp0 = NUMSTATE_YP(ida_solver); BA_STACK_NVECTOR(y0, nv_y0); BA_STACK_NVECTOR(yp0, nv_yp0); value gi = Field(EVENTSTATE(ida_solver), 3); const intnat ev_len = Caml_ba_array_val(gi)->dim[0]; const int ret = IDAInit(IDA_MEM(ida_solver), &sundials_ml_residual_wrapper, rt_t0, &nv_y0, &nv_yp0); if (ev_len > 0) { IDARootInit(IDA_MEM(ida_solver), ev_len, sundials_ml_event_wrapper); } CAMLreturn(Val_int(ret)); }
CAMLprim value stub_blk_read(value sector, value buffer, value num) { CAMLparam3(sector, buffer, num); uint64_t sec = Int64_val(sector); uint8_t *data = Caml_ba_data_val(buffer); int n = Int_val(num); int ret = 0; assert(Caml_ba_array_val(buffer)->num_dims == 1); //printf("Solo5 blk read: sec=%d num=%d\n", sec, n); ret = solo5_blk_read_sync(sec, data, &n); if ( ret ) printf("virtio read failed... %d from sector=%d\n", n, sec); #if 0 { int i; for (i = 0; i < n; i++) { printf("%02x ", (uint8_t) data[i]); if ( i % 16 == 15 ) printf("\n"); } printf("\n"); } #endif CAMLreturn(Val_bool(!ret)); }
static void caml_ba_finalize(value v) { struct caml_ba_array * b = Caml_ba_array_val(v); switch (b->flags & CAML_BA_MANAGED_MASK) { case CAML_BA_EXTERNAL: break; case CAML_BA_MANAGED: if (b->proxy == NULL) { free(b->data); } else { if (-- b->proxy->refcount == 0) { free(b->proxy->data); caml_stat_free(b->proxy); } } break; case CAML_BA_MAPPED_FILE: if (b->proxy == NULL) { caml_ba_unmap_file(b->data, caml_ba_byte_size(b)); } else { if (-- b->proxy->refcount == 0) { caml_ba_unmap_file(b->proxy->data, b->proxy->size); caml_stat_free(b->proxy); } } break; } }
/* Adapted from sundials-2.5.0/src/nvec_par/nvector_parallel.c: N_VCloneEmpty_Parallel */ static N_Vector clone_parallel(N_Vector w) { CAMLparam0(); CAMLlocal2(v_payload, w_payload); N_Vector v; N_VectorContent_Parallel content; if (w == NULL) CAMLreturnT (N_Vector, NULL); w_payload = NVEC_BACKLINK(w); struct caml_ba_array *w_ba = Caml_ba_array_val(Field(w_payload, 0)); /* Create vector (we need not copy the data) */ v_payload = caml_alloc_tuple(3); Store_field(v_payload, 0, caml_ba_alloc(w_ba->flags, w_ba->num_dims, NULL, w_ba->dim)); Store_field(v_payload, 1, Field(w_payload, 1)); Store_field(v_payload, 2, Field(w_payload, 2)); v = sunml_alloc_cnvec(sizeof(struct _N_VectorContent_Parallel), v_payload); if (v == NULL) CAMLreturnT (N_Vector, NULL); content = (N_VectorContent_Parallel) v->content; /* Create vector operation structure */ sunml_clone_cnvec_ops(v, w); /* Attach lengths and communicator */ content->local_length = NV_LOCLENGTH_P(w); content->global_length = NV_GLOBLENGTH_P(w); content->comm = NV_COMM_P(w); content->own_data = 0; content->data = Caml_ba_data_val(Field(v_payload, 0)); CAMLreturnT(N_Vector, v); }
CAMLprim value caml_ba_dim(value vb, value vn) { struct caml_ba_array * b = Caml_ba_array_val(vb); intnat n = Long_val(vn); if (n >= b->num_dims) caml_invalid_argument("Bigarray.dim"); return Val_long(b->dim[n]); }
CAMLprim value caml_ba_reshape(value vb, value vdim) { CAMLparam2 (vb, vdim); CAMLlocal1 (res); #define b ((struct caml_ba_array *) Caml_ba_array_val(vb)) intnat dim[CAML_BA_MAX_NUM_DIMS]; mlsize_t num_dims; uintnat num_elts; int i; num_dims = Wosize_val(vdim); if (num_dims < 1 || num_dims > CAML_BA_MAX_NUM_DIMS) caml_invalid_argument("Bigarray.reshape: bad number of dimensions"); num_elts = 1; for (i = 0; i < num_dims; i++) { dim[i] = Long_val(Field(vdim, i)); if (dim[i] < 0) caml_invalid_argument("Bigarray.reshape: negative dimension"); num_elts *= dim[i]; } /* Check that sizes agree */ if (num_elts != caml_ba_num_elts(b)) caml_invalid_argument("Bigarray.reshape: size mismatch"); /* Create bigarray with same data and new dimensions */ res = caml_ba_alloc(b->flags, num_dims, b->data, dim); /* Create or update proxy in case of managed bigarray */ caml_ba_update_proxy(b, Caml_ba_array_val(res)); /* Return result */ CAMLreturn (res); #undef b }
static void * base_page_of(value v_iopage) { /* The grant API takes page-alignted addresses. */ struct caml_ba_array *a = (struct caml_ba_array *)Caml_ba_array_val(v_iopage); unsigned long page_aligned_view = (unsigned long)a->data & ~(PAGE_SIZE - 1); return (void*) page_aligned_view; }
void hdf5_h5lt_make_dataset(value loc_id_v, value dset_name_v, value type_id_v, value buffer_v) { CAMLparam4(loc_id_v, dset_name_v, type_id_v, buffer_v); struct caml_ba_array *buffer = Caml_ba_array_val(buffer_v); raise_if_fail(H5LTmake_dataset(Hid_val(loc_id_v), String_val(dset_name_v), buffer->num_dims, (const hsize_t*) buffer->dim, Hid_val(type_id_v), buffer->data)); CAMLreturn0; }
__pure static inline int contains_mmapped(value v_iovecs, int n) { for (--n; n >= 0; --n) { value v_iovec = Field(v_iovecs, n); int flags = Caml_ba_array_val(Field(v_iovec, 0))->flags; if (unlikely(flags & CAML_BA_MAPPED_FILE)) return 1; } return 0; }
static void caml_ba_serialize(value v, uintnat * wsize_32, uintnat * wsize_64) { struct caml_ba_array * b = Caml_ba_array_val(v); intnat num_elts; int i; /* Serialize header information */ caml_serialize_int_4(b->num_dims); caml_serialize_int_4(b->flags & (CAML_BA_KIND_MASK | CAML_BA_LAYOUT_MASK)); /* On a 64-bit machine, if any of the dimensions is >= 2^32, the size of the marshaled data will be >= 2^32 and extern_value() will fail. So, it is safe to write the dimensions as 32-bit unsigned integers. */ for (i = 0; i < b->num_dims; i++) caml_serialize_int_4(b->dim[i]); /* Compute total number of elements */ num_elts = 1; for (i = 0; i < b->num_dims; i++) num_elts = num_elts * b->dim[i]; /* Serialize elements */ switch (b->flags & CAML_BA_KIND_MASK) { case CAML_BA_SINT8: case CAML_BA_UINT8: caml_serialize_block_1(b->data, num_elts); break; case CAML_BA_SINT16: case CAML_BA_UINT16: caml_serialize_block_2(b->data, num_elts); break; #ifdef _KERNEL #else case CAML_BA_FLOAT32: #endif case CAML_BA_INT32: caml_serialize_block_4(b->data, num_elts); break; #ifdef _KERNEL #else case CAML_BA_COMPLEX32: caml_serialize_block_4(b->data, num_elts * 2); break; case CAML_BA_FLOAT64: case CAML_BA_INT64: caml_serialize_block_8(b->data, num_elts); break; case CAML_BA_COMPLEX64: caml_serialize_block_8(b->data, num_elts * 2); break; #endif case CAML_BA_CAML_INT: caml_ba_serialize_longarray(b->data, num_elts, -0x40000000, 0x3FFFFFFF); break; case CAML_BA_NATIVE_INT: caml_ba_serialize_longarray(b->data, num_elts, -0x80000000, 0x7FFFFFFF); break; } /* Compute required size in Caml heap. Assumes struct caml_ba_array is exactly 4 + num_dims words */ Assert(sizeof(struct caml_ba_array) == 5 * sizeof(value)); *wsize_32 = (4 + b->num_dims) * 4; *wsize_64 = (4 + b->num_dims) * 8; }
CAMLprim value bigstring_blit_stub( value v_src, value v_src_pos, value v_dst, value v_dst_pos, value v_len) { struct caml_ba_array *ba_src = Caml_ba_array_val(v_src); struct caml_ba_array *ba_dst = Caml_ba_array_val(v_dst); char *src = (char *) ba_src->data + Long_val(v_src_pos); char *dst = (char *) ba_dst->data + Long_val(v_dst_pos); size_t len = Long_val(v_len); if (len > THREAD_IO_CUTOFF) { Begin_roots2(v_src, v_dst); caml_enter_blocking_section(); memmove(dst, src, Long_val(v_len)); caml_leave_blocking_section(); End_roots(); } else memmove(dst, src, Long_val(v_len)); return Val_unit; }
static value caml_ba_set_aux(value vb, value * vind, intnat nind, value newval) { struct caml_ba_array * b = Caml_ba_array_val(vb); intnat index[CAML_BA_MAX_NUM_DIMS]; int i; intnat offset; /* Check number of indices = number of dimensions of array (maybe not necessary if ML typing guarantees this) */ if (nind != b->num_dims) caml_invalid_argument("Bigarray.set: wrong number of indices"); /* Compute offset and check bounds */ for (i = 0; i < b->num_dims; i++) index[i] = Long_val(vind[i]); offset = caml_ba_offset(b, index); /* Perform write */ switch (b->flags & CAML_BA_KIND_MASK) { default: Assert(0); #ifdef _KERNEL #else case CAML_BA_FLOAT32: ((float *) b->data)[offset] = Double_val(newval); break; case CAML_BA_FLOAT64: ((double *) b->data)[offset] = Double_val(newval); break; #endif case CAML_BA_SINT8: case CAML_BA_UINT8: ((int8 *) b->data)[offset] = Int_val(newval); break; case CAML_BA_SINT16: case CAML_BA_UINT16: ((int16 *) b->data)[offset] = Int_val(newval); break; case CAML_BA_INT32: ((int32 *) b->data)[offset] = Int32_val(newval); break; case CAML_BA_INT64: ((int64 *) b->data)[offset] = Int64_val(newval); break; case CAML_BA_NATIVE_INT: ((intnat *) b->data)[offset] = Nativeint_val(newval); break; case CAML_BA_CAML_INT: ((intnat *) b->data)[offset] = Long_val(newval); break; #ifdef _KERNEL #else case CAML_BA_COMPLEX32: { float * p = ((float *) b->data) + offset * 2; p[0] = Double_field(newval, 0); p[1] = Double_field(newval, 1); break; } case CAML_BA_COMPLEX64: { double * p = ((double *) b->data) + offset * 2; p[0] = Double_field(newval, 0); p[1] = Double_field(newval, 1); break; } #endif } return Val_unit; }
CAMLprim value stub_atomic_fetch_and_uint8(value buf, value idx, value val) { CAMLparam3(buf, idx, val); uint8_t c_val = (uint8_t)Int_val(val); uint8_t *ptr = Caml_ba_data_val(buf) + Int_val(idx); if (Int_val(idx) >= Caml_ba_array_val(buf)->dim[0]) caml_invalid_argument("idx"); CAMLreturn(Val_int((uint8_t)__sync_fetch_and_and(ptr, c_val))); }
CAMLprim value caml_ba_sub(value vb, value vofs, value vlen) { CAMLparam3 (vb, vofs, vlen); CAMLlocal1 (res); #define b ((struct caml_ba_array *) Caml_ba_array_val(vb)) intnat ofs = Long_val(vofs); intnat len = Long_val(vlen); int i, changed_dim; intnat mul; char * sub_data; /* Compute offset and check bounds */ if ((b->flags & CAML_BA_LAYOUT_MASK) == CAML_BA_C_LAYOUT) { /* We reduce the first dimension */ mul = 1; for (i = 1; i < b->num_dims; i++) mul *= b->dim[i]; changed_dim = 0; } else { /* We reduce the last dimension */ mul = 1; for (i = 0; i < b->num_dims - 1; i++) mul *= b->dim[i]; changed_dim = b->num_dims - 1; ofs--; /* Fortran arrays start at 1 */ } if (ofs < 0 || len < 0 || ofs + len > b->dim[changed_dim]) caml_invalid_argument("Bigarray.sub: bad sub-array"); sub_data = (char *) b->data + ofs * mul * caml_ba_element_size[b->flags & CAML_BA_KIND_MASK]; /* Allocate an OCaml bigarray to hold the result */ res = caml_ba_alloc(b->flags, b->num_dims, sub_data, b->dim); /* Doctor the changed dimension */ Caml_ba_array_val(res)->dim[changed_dim] = len; /* Create or update proxy in case of managed bigarray */ caml_ba_update_proxy(b, Caml_ba_array_val(res)); /* Return result */ CAMLreturn (res); #undef b }
value caml_ba_get_N(value vb, value * vind, int nind) { struct caml_ba_array * b = Caml_ba_array_val(vb); intnat index[CAML_BA_MAX_NUM_DIMS]; int i; intnat offset; /* Check number of indices = number of dimensions of array (maybe not necessary if ML typing guarantees this) */ if (nind != b->num_dims) caml_invalid_argument("Bigarray.get: wrong number of indices"); /* Compute offset and check bounds */ for (i = 0; i < b->num_dims; i++) index[i] = Long_val(vind[i]); offset = caml_ba_offset(b, index); /* Perform read */ switch ((b->flags) & CAML_BA_KIND_MASK) { default: Assert(0); #ifdef _KERNEL #else case CAML_BA_FLOAT32: return caml_copy_double(((float *) b->data)[offset]); case CAML_BA_FLOAT64: return caml_copy_double(((double *) b->data)[offset]); #endif case CAML_BA_SINT8: return Val_int(((int8 *) b->data)[offset]); case CAML_BA_UINT8: return Val_int(((uint8 *) b->data)[offset]); case CAML_BA_SINT16: return Val_int(((int16 *) b->data)[offset]); case CAML_BA_UINT16: return Val_int(((uint16 *) b->data)[offset]); case CAML_BA_INT32: return caml_copy_int32(((int32 *) b->data)[offset]); case CAML_BA_INT64: return caml_copy_int64(((int64 *) b->data)[offset]); case CAML_BA_NATIVE_INT: return caml_copy_nativeint(((intnat *) b->data)[offset]); case CAML_BA_CAML_INT: return Val_long(((intnat *) b->data)[offset]); #ifdef _KERNEL #else case CAML_BA_COMPLEX32: { float * p = ((float *) b->data) + offset * 2; return copy_two_doubles(p[0], p[1]); } case CAML_BA_COMPLEX64: { double * p = ((double *) b->data) + offset * 2; return copy_two_doubles(p[0], p[1]); } #endif } }
CAMLprim value stub_gnttab_unmap(value xgh, value array) { CAMLparam2(xgh, array); int size = Caml_ba_array_val(array)->dim[0]; int pages = size >> XC_PAGE_SHIFT; int result = xc_gnttab_munmap(_G(xgh), Caml_ba_data_val(array), pages); if(result!=0) { caml_failwith("Failed to unmap grant"); } CAMLreturn(Val_unit); }
CAMLprim value caml_ba_blit(value vsrc, value vdst) { struct caml_ba_array * src = Caml_ba_array_val(vsrc); struct caml_ba_array * dst = Caml_ba_array_val(vdst); int i; intnat num_bytes; /* Check same numbers of dimensions and same dimensions */ if (src->num_dims != dst->num_dims) goto blit_error; for (i = 0; i < src->num_dims; i++) if (src->dim[i] != dst->dim[i]) goto blit_error; /* Compute number of bytes in array data */ num_bytes = caml_ba_num_elts(src) * caml_ba_element_size[src->flags & CAML_BA_KIND_MASK]; /* Do the copying */ memmove (dst->data, src->data, num_bytes); return Val_unit; blit_error: caml_invalid_argument("Bigarray.blit: dimension mismatch"); return Val_unit; /* not reached */ }
CAMLprim value sort_bigarrays (value va, value vb, value l) { struct caml_ba_array * a0 = Caml_ba_array_val(va); struct caml_ba_array * b0 = Caml_ba_array_val(vb); int len = Long_val (l); assert (a0->dim[0] >= len); assert (b0->dim[0] >= len); intnat * a1 = (intnat *) a0->data; intnat * b1 = (intnat *) b0->data; if (len <= cuttoff) isort (a1, b1, 0, len, a1, b1, 0, len); else { int len1 = len / 2; int len2 = len - len1; intnat * a2 = (intnat *) malloc (len2 * sizeof (intnat)); intnat * b2 = (intnat *) malloc (len2 * sizeof (intnat)); sort_rec (a1, b1, len1, len, a2, b2, 0, len2); sort_rec (a1, b1, 0, len1, a1, b1, len2, len); merge (a1, b1, len2, len, a2, b2, 0, len2, a1, b1, 0, len); free (a2); free (b2); } return Val_unit; }
/* [caml_ba_alloc] will allocate a new bigarray object in the heap. If [data] is NULL, the memory for the contents is also allocated (with [malloc]) by [caml_ba_alloc]. [data] cannot point into the OCaml heap. [dim] may point into an object in the OCaml heap. */ CAMLexport value caml_ba_alloc(int flags, int num_dims, void * data, intnat * dim) { uintnat num_elts, asize, size; int overflow, i; value res; struct caml_ba_array * b; intnat dimcopy[CAML_BA_MAX_NUM_DIMS]; #if defined(__FreeBSD__) && defined(_KERNEL) struct caml_ba_proxy *proxy; #endif Assert(num_dims >= 1 && num_dims <= CAML_BA_MAX_NUM_DIMS); Assert((flags & CAML_BA_KIND_MASK) <= CAML_BA_COMPLEX64); for (i = 0; i < num_dims; i++) dimcopy[i] = dim[i]; size = 0; if (data == NULL) { overflow = 0; num_elts = 1; for (i = 0; i < num_dims; i++) { num_elts = caml_ba_multov(num_elts, dimcopy[i], &overflow); } size = caml_ba_multov(num_elts, caml_ba_element_size[flags & CAML_BA_KIND_MASK], &overflow); if (overflow) caml_raise_out_of_memory(); data = __malloc(size); if (data == NULL && size != 0) caml_raise_out_of_memory(); flags |= CAML_BA_MANAGED; } asize = SIZEOF_BA_ARRAY + num_dims * sizeof(intnat); res = caml_alloc_custom(&caml_ba_ops, asize, size, CAML_BA_MAX_MEMORY); b = Caml_ba_array_val(res); #if defined(__FreeBSD__) && defined(_KERNEL) if ((flags & CAML_BA_MANAGED_MASK) != CAML_BA_MANAGED) { b->proxy = __malloc(sizeof(struct caml_ba_proxy)); if (b->proxy == NULL) caml_raise_out_of_memory(); proxy = b->proxy; for (proxy->size = 0, i = 0; i < num_dims; i++) proxy->size += dim[i]; proxy->refcount = 1; if ((flags & CAML_BA_MANAGED_MASK) == CAML_BA_FBSD_MBUF) { proxy->type = CAML_FREEBSD_MBUF; proxy->data = data; b->data = mtod((struct mbuf *) proxy->data, void *); }
CAMLprim value stub_xc_gntshr_munmap(value xgh, value share) { CAMLparam2(xgh, share); CAMLlocal1(ml_map); #ifdef HAVE_GNTSHR ml_map = Field(share, 1); int size = Caml_ba_array_val(ml_map)->dim[0]; int pages = size >> XC_PAGE_SHIFT; int result = xc_gntshr_munmap(_G(xgh), Caml_ba_data_val(ml_map), pages); if(result != 0) failwith_xc(_G(xgh)); #else gntshr_missing(); #endif CAMLreturn(Val_unit); }