CAMLprim value oci_wait4(value flags, value pid_req) { CAMLparam0(); CAMLlocal1(v_usage); int pid, status, cv_flags; struct rusage ru; cv_flags = convert_flag_list(flags, wait_flag_table); enter_blocking_section(); pid = wait4(Int_val(pid_req), &status, cv_flags, &ru); leave_blocking_section(); if (pid == -1) uerror("wait4", pid_req); v_usage = caml_alloc(16, 0); Store_field(v_usage, 0, caml_copy_double((double) ru.ru_utime.tv_sec + (double) ru.ru_utime.tv_usec / 1e6)); Store_field(v_usage, 1, caml_copy_double((double) ru.ru_stime.tv_sec + (double) ru.ru_stime.tv_usec / 1e6)); Store_field(v_usage, 2, caml_copy_int64(ru.ru_maxrss)); Store_field(v_usage, 3, caml_copy_int64(ru.ru_ixrss)); Store_field(v_usage, 4, caml_copy_int64(ru.ru_idrss)); Store_field(v_usage, 5, caml_copy_int64(ru.ru_isrss)); Store_field(v_usage, 6, caml_copy_int64(ru.ru_minflt)); Store_field(v_usage, 7, caml_copy_int64(ru.ru_majflt)); Store_field(v_usage, 8, caml_copy_int64(ru.ru_nswap)); Store_field(v_usage, 9, caml_copy_int64(ru.ru_inblock)); Store_field(v_usage, 10, caml_copy_int64(ru.ru_oublock)); Store_field(v_usage, 11, caml_copy_int64(ru.ru_msgsnd)); Store_field(v_usage, 12, caml_copy_int64(ru.ru_msgrcv)); Store_field(v_usage, 13, caml_copy_int64(ru.ru_nsignals)); Store_field(v_usage, 14, caml_copy_int64(ru.ru_nvcsw)); Store_field(v_usage, 15, caml_copy_int64(ru.ru_nivcsw)); CAMLreturn(alloc_process_status(pid, status,v_usage)); }
CAMLprim value caml_get_current_callstack(value max_frames_value) { CAMLparam1(max_frames_value); CAMLlocal1(trace); /* we use `intnat` here because, were it only `int`, passing `max_int` from the OCaml side would overflow on 64bits machines. */ intnat max_frames = Long_val(max_frames_value); intnat trace_size; /* first compute the size of the trace */ { value * sp = caml_extern_sp; intnat trap_spoff = caml_trap_sp_off; for (trace_size = 0; trace_size < max_frames; trace_size++) { code_t p = caml_next_frame_pointer(&sp, &trap_spoff); if (p == NULL) break; } } trace = caml_alloc(trace_size, 0); /* then collect the trace */ { value * sp = caml_extern_sp; intnat trap_spoff = caml_trap_sp_off; uintnat trace_pos; for (trace_pos = 0; trace_pos < trace_size; trace_pos++) { code_t p = caml_next_frame_pointer(&sp, &trap_spoff); Assert(p != NULL); Field(trace, trace_pos) = Val_Codet(p); } } CAMLreturn(trace); }
value caml_alloc_stack (value hval, value hexn, value heff) { CAMLparam3(hval, hexn, heff); CAMLlocal1(stack); char* sp; struct caml_context *ctxt; stack = caml_alloc(caml_init_fiber_wsz, Stack_tag); Stack_dirty(stack) = Val_long(0); Stack_handle_value(stack) = hval; Stack_handle_exception(stack) = hexn; Stack_handle_effect(stack) = heff; Stack_parent(stack) = Val_unit; sp = Stack_high(stack); /* Fiber exception handler that returns to parent */ sp -= sizeof(value); *(value**)sp = (value*)caml_fiber_exn_handler; /* No previous exception frame */ sp -= sizeof(value); *(uintnat*)sp = 0; /* Value handler that returns to parent */ sp -= sizeof(value); *(value**)sp = (value*)caml_fiber_val_handler; /* Build a context */ sp -= sizeof(struct caml_context); ctxt = (struct caml_context*)sp; ctxt->exception_ptr_offset = 2 * sizeof(value); ctxt->gc_regs = NULL; Stack_sp(stack) = 3 * sizeof(value) + sizeof(struct caml_context); caml_gc_log ("Allocate stack=0x%lx of %lu words\n", stack, caml_init_fiber_wsz); CAMLreturn (stack); }
static value read_debug_info(void) { CAMLparam0(); CAMLlocal1(events); char * exec_name; int fd; struct exec_trailer trail; struct channel * chan; uint32 num_events, orig, i; value evl, l; exec_name = caml_exe_name; fd = caml_attempt_open(&exec_name, &trail, 1); if (fd < 0) CAMLreturn(Val_false); caml_read_section_descriptors(fd, &trail); if (caml_seek_optional_section(fd, &trail, "DBUG") == -1) { close(fd); CAMLreturn(Val_false); } chan = caml_open_descriptor_in(fd); num_events = caml_getword(chan); events = caml_alloc(num_events, 0); for (i = 0; i < num_events; i++) { orig = caml_getword(chan); evl = caml_input_val(chan); /* Relocate events in event list */ for (l = evl; l != Val_int(0); l = Field(l, 1)) { value ev = Field(l, 0); Field(ev, EV_POS) = Val_long(Long_val(Field(ev, EV_POS)) + orig); } /* Record event list */ Store_field(events, i, evl); } caml_close_channel(chan); CAMLreturn(events); }
CAMLprim value mapi_fetch_field_list_stub(value handle) { CAMLparam1(handle); CAMLlocal2(cli, cons); char **fields = mapi_fetch_field_array((MapiHdl) handle); int nr_fields = mapi_get_field_count((MapiHdl) handle); if (fields != NULL) { cli = Val_emptylist; int i = 0; for (i = (nr_fields - 1); i >= 0; --i) { cons = caml_alloc(2, 0); Store_field(cons, 0, caml_copy_string(fields[i])); Store_field(cons, 1, cli); cli = cons; } CAMLreturn(Val_some(cli)); } else { CAMLreturn(Val_none); } }
// -- Ocaml wrapper function returning an array of n double prec // -- random numbers in the open interval (0, 1). CAMLprim value wrap_rng_get_array(value vn) { CAMLparam1(vn); CAMLlocal1(ar); int j; // -- check constrains const int n = Int_val(vn); if ( n < 0 ) caml_invalid_argument("Rng.get_array: n must be positive or 0"); // -- OCaml does not allow for heap allocated zero-sized arrays; // -- return atom instead if (n == 0) CAMLreturn(Atom(0)); // -- allocate block and initialize ar = caml_alloc(n * NDBL, Double_array_tag); for (j = 0; j < n; j++) Store_double_field(ar, j, fac * rng_next()); CAMLreturn(ar); }
CAMLprim value mmdb_ml_lookup_path(value ip, value query_list, value mmdb) { CAMLparam3(ip, query_list, mmdb); CAMLlocal3(iter_count, caml_clean_result, query_r); int total_len = 0, copy_count = 0, gai_error = 0, mmdb_error = 0; char *clean_result; long int int_result; iter_count = query_list; unsigned int len = caml_string_length(ip); char *as_string = caml_strdup(String_val(ip)); if (strlen(as_string) != (size_t)len) { caml_failwith("Could not copy IP address properly"); } MMDB_s *as_mmdb = (MMDB_s*)Data_custom_val(mmdb); MMDB_lookup_result_s *result = caml_stat_alloc(sizeof(*result)); *result = MMDB_lookup_string(as_mmdb, as_string, &gai_error, &mmdb_error); check_error(gai_error, mmdb_error); caml_stat_free(as_string); while (iter_count != Val_emptylist) { total_len++; iter_count = Field(iter_count, 1); } char **query = caml_stat_alloc(sizeof(char *) * (total_len + 1)); while (query_list != Val_emptylist) { query[copy_count] = caml_strdup(String_val(Field(query_list, 0))); copy_count++; query_list = Field(query_list, 1); } query[total_len] = NULL; MMDB_entry_data_s entry_data; int status = MMDB_aget_value(&result->entry, &entry_data, (const char *const *const)query); check_status(status); check_data(entry_data); caml_stat_free(result); for (int i = 0; i < copy_count; caml_stat_free(query[i]), i++); caml_stat_free(query); query_r = caml_alloc(2, 0); as_mmdb = NULL; switch (entry_data.type) { case MMDB_DATA_TYPE_BYTES: clean_result = caml_stat_alloc(entry_data.data_size + 1); memcpy(clean_result, entry_data.bytes, entry_data.data_size); caml_clean_result = caml_copy_string(clean_result); caml_stat_free(clean_result); goto string_finish; case MMDB_DATA_TYPE_UTF8_STRING: clean_result = strndup(entry_data.utf8_string, entry_data.data_size); caml_clean_result = caml_copy_string(clean_result); free(clean_result); goto string_finish; case MMDB_DATA_TYPE_FLOAT: Store_field(query_r, 0, polymorphic_variants.poly_float); Store_field(query_r, 1, caml_copy_double(entry_data.float_value)); goto finish; case MMDB_DATA_TYPE_BOOLEAN: Store_field(query_r, 0, polymorphic_variants.poly_bool); Store_field(query_r, 1, Val_true ? entry_data.boolean : Val_false); goto finish; case MMDB_DATA_TYPE_DOUBLE: Store_field(query_r, 0, polymorphic_variants.poly_float); Store_field(query_r, 1, caml_copy_double(entry_data.double_value)); goto finish; case MMDB_DATA_TYPE_UINT16: Store_field(query_r, 0, polymorphic_variants.poly_int); int_result = Val_long(entry_data.uint16); goto int_finish; case MMDB_DATA_TYPE_UINT32: Store_field(query_r, 0, polymorphic_variants.poly_int); int_result = Val_long(entry_data.uint32); goto int_finish; case MMDB_DATA_TYPE_UINT64: Store_field(query_r, 0, polymorphic_variants.poly_int); int_result = Val_long(entry_data.uint32); goto int_finish; // look at /usr/bin/sed -n 1380,1430p src/maxminddb.c case MMDB_DATA_TYPE_ARRAY: case MMDB_DATA_TYPE_MAP: caml_failwith("Can't return a Map or Array yet"); } string_finish: Store_field(query_r, 0, polymorphic_variants.poly_string); Store_field(query_r, 1, caml_clean_result); CAMLreturn(query_r); int_finish: Store_field(query_r, 1, int_result); CAMLreturn(query_r); finish: CAMLreturn(query_r); }
static void affect(value& v, T const* t) { v = caml_alloc(1, Abstract_tag); Store_field(v, 0,reinterpret_cast<value>(t)); }
CAMLprim value tbstub_poll_event() { CAMLparam0(); CAMLlocal3(caml_e, caml_ch, caml_size); struct tb_event e; tb_poll_event(&e); // type event = // | Key of key -> block with tag 0 // | Ascii of char -> block with tag 1 // | Utf8 of int32 -> block with tag 2 // | Resize of int * int -> block with tag 3 if( e.type == TB_EVENT_KEY ) { // Key // // We deviate from tb_event definition of key here. // Some keys are really low enough to be considered ascii values. // // tb_poll_event reports ch as 0 whenever a 'key' is present. if( e.ch == 0 && e.key > 0xFF ) { caml_e = caml_alloc(1, 0); // We use a bit of a trick here to convert e.key to // type key = // | F1 -> Val_int(0) // ... // | Arrow_right -> Val_int(21) // // All (non-ascii) TB_KEY_* values are defined as (0xFFFF-0)(F1)...(0xFFFF-21)(ARROW_RIGHT). // Notice the pattern -> ^ ^ // // By "restoring" that offset number we get the int value that we need to represent the variant. Store_field(caml_e, 0, Val_int(0xFFFF - e.key)); } // Ascii // // tb_poll_event reports key as 0 whenever a ch is present. else if( e.ch <= 0xFF ) { caml_e = caml_alloc(1, 1); // Another bit of tricky code. // At this point, we know that either e.key < 255 && e.ch = 0 // or e.key = 0 && e.ch < 255 // So we just bitwise or the two values to get our ascii value. Store_field(caml_e, 0, Val_int(e.ch | e.key)); } // Utf8 else { // All else failed, so we need to represent the ch value as an int32 block, // since OCaml has no unicode support. caml_e = caml_alloc(1, 2); caml_ch = caml_copy_int32(e.ch); Store_field(caml_e, 0, caml_ch); } } // Resize else { caml_size = caml_alloc_tuple(2); Store_field(caml_size, 0, Val_int(e.w)); Store_field(caml_size, 1, Val_int(e.h)); caml_e = caml_alloc(1, 3); Store_field(caml_e, 0, caml_size); } CAMLreturn(caml_e); }
CAMLprim value ocaml_get_routing_table(value unit) { CAMLparam1(unit); CAMLlocal3( ret, tmp, entry ); struct nl_sock *fd; struct nl_cache *res, *links; struct rtnl_route *it; uint32 i_ip, netmask = 0, mask_len, gw; int i; struct nl_addr *ip; char device_name[IFNAMSIZ]; struct rtnl_nexthop *to; fd = nl_socket_alloc(); if (!fd) { fprintf(stderr, "error nl_socket_alloc\n"); exit(1); } if(nl_connect(fd, NETLINK_ROUTE) < 0) { fprintf(stderr, "error nl_connect\n"); exit(1); } ret = Val_emptylist; if(rtnl_route_alloc_cache(fd, AF_UNSPEC, 0, &res) < 0) { fprintf(stderr, "error rtnl_route_alloc_cache"); exit(1); } if(rtnl_link_alloc_cache (fd, AF_UNSPEC, &links) < 0) { fprintf(stderr, "error rtnl_link_alloc_cache"); exit(1); } it = (struct rtnl_route *)nl_cache_get_first(res); for(; it != NULL; it = (struct rtnl_route *) nl_cache_get_next((struct nl_object *)it) ) { if(rtnl_route_get_family (it) == AF_INET) { ip = rtnl_route_get_dst(it); i_ip = ntohl(*(int *)nl_addr_get_binary_addr(ip)); mask_len = nl_addr_get_prefixlen(ip); for(i = 0; i < 32; i++) netmask = (netmask << 1) + (i< mask_len?1:0); to = rtnl_route_nexthop_n(it, 0); rtnl_link_i2name(links, rtnl_route_nh_get_ifindex(to), device_name, IFNAMSIZ); if ( rtnl_route_nh_get_gateway (to) != NULL) gw = ntohl(*(int *)nl_addr_get_binary_addr( rtnl_route_nh_get_gateway (to))); else gw = 0; /*printf("src ip:%x mask:%x gw:%x dev:%s\n", i_ip, netmask, */ /*gw, device_name);*/ entry = caml_alloc(7,0); Store_field(entry, 0, Val_int(i_ip & 0xFFFF)); Store_field(entry, 1, Val_int(i_ip >> 16)); Store_field(entry, 2, Val_int(netmask & 0xFFFF)); Store_field(entry, 3, Val_int(netmask >> 16)); Store_field(entry, 4, Val_int(gw & 0xFFFF)); Store_field(entry, 5, Val_int(gw >> 16)); Store_field(entry, 6, caml_copy_string(device_name)); // store in list tmp = caml_alloc(2, 0); Store_field( tmp, 0, entry); // head Store_field( tmp, 1, ret); // tail ret = tmp; } }
CAMLprim value caml_extunix_recvmsg(value fd_val) { CAMLparam1(fd_val); CAMLlocal2(data, res); struct msghdr msg; int fd = Int_val(fd_val); int recvfd; ssize_t len; struct iovec iov[1]; char buf[4096]; #if defined(CMSG_SPACE) union { struct cmsghdr cmsg; /* just for alignment */ char control[CMSG_SPACE(sizeof recvfd)]; } control_un; struct cmsghdr *cmsgp; memset(&msg, 0, sizeof msg); msg.msg_control = control_un.control; msg.msg_controllen = CMSG_LEN(sizeof recvfd); #else msg.msg_accrights = (caddr_t)&recvfd; msg.msg_accrightslen = sizeof recvfd; #endif iov[0].iov_base = buf; iov[0].iov_len = sizeof buf; msg.msg_iov = iov; msg.msg_iovlen = 1; caml_enter_blocking_section(); len = recvmsg(fd, &msg, 0); caml_leave_blocking_section(); if (len == -1) uerror("recvmsg", Nothing); res = caml_alloc(2, 0); #if defined(CMSG_SPACE) cmsgp = CMSG_FIRSTHDR(&msg); if (cmsgp == NULL) { Store_field(res, 0, Val_none); } else { CAMLlocal1(some_fd); if (cmsgp->cmsg_len != CMSG_LEN(sizeof recvfd)) unix_error(EINVAL, "recvmsg", caml_copy_string("wrong descriptor size")); if (cmsgp->cmsg_level != SOL_SOCKET || cmsgp->cmsg_type != SCM_RIGHTS) unix_error(EINVAL, "recvmsg", caml_copy_string("invalid protocol")); some_fd = caml_alloc(1, 0); Store_field(some_fd, 0, Val_int(*(int *)CMSG_DATA(cmsgp))); Store_field(res, 0, some_fd); } #else if (msg.msg_accrightslen != sizeof recvfd) { Store_field(res, 0, Val_none); } else { CAMLlocal1(some_fd); some_fd = caml_alloc(1, 0); Store_field(some_fd, 0, Val_int(recvfd)); Store_field(res, 0, some_fd); } #endif data = caml_alloc_string(len); memcpy(String_val(data), buf, len); Store_field(res, 1, data); CAMLreturn (res); }
value read_dump_file(value filename) { CAMLparam1(filename); CAMLlocal5(ocaml_var, ocaml_list_el1, ocaml_list_el2, ocaml_dyn_type, ocaml_dyn_aux); CAMLlocal2(ocaml_name, ocaml_hostval); printf("opening %s\n", String_val(filename)); FILE *ifile = fopen(String_val(filename), "r"); if (ifile == NULL) { ocaml_var = Val_int(0); CAMLreturn(ocaml_var); } int num_vars = 0; fread(&num_vars, sizeof(int), 1, ifile); if (num_vars == 0) { ocaml_var = Val_int(0); CAMLreturn(ocaml_var); } printf("parsing vars\n"); ocaml_list_el2 = caml_alloc_tuple(2); int i, j; int name_len, shape_len, type, num_bytes; int *shape; char *name, *data; for (i = 0; i < num_vars; ++i) { // Allocate the variable tuple of (name, hostval) ocaml_var = caml_alloc_tuple(2); // Build the name fread(&name_len, sizeof(int), 1, ifile); ocaml_name = caml_alloc_string(name_len); fread(String_val(ocaml_name), sizeof(char), name_len, ifile); Store_field(ocaml_var, 0, ocaml_name); printf("got var name: %s\n", String_val(ocaml_name)); // Build the shape fread(&shape_len, sizeof(int), 1, ifile); shape = (int*)malloc(shape_len * sizeof(int)); fread(shape, sizeof(int), shape_len, ifile); printf("got shape of len %d\n", shape_len); // Build the DynType fread(&ocaml_dyn_type, sizeof(value), 1, ifile); for (j = 0; j < shape_len; ++j) { ocaml_dyn_aux = ocaml_dyn_type; ocaml_dyn_type = caml_alloc(1, VecT); Store_field(ocaml_dyn_type, 0, ocaml_dyn_aux); } printf("built dyn_type\n"); // Get the num_bytes fread(&num_bytes, sizeof(int), 1, ifile); // Get the payload data = (char*)malloc(num_bytes); fread(data, 1, num_bytes, ifile); printf("got payload of %d bytes\n", num_bytes); // Build the HostVal ocaml_hostval = build_ocaml_hostval(num_bytes, ocaml_dyn_type, shape, shape_len, data); printf("built hostval from inputs\n"); Store_field(ocaml_var, 1, ocaml_hostval); // Insert the var into the list if (i == 0) { Store_field(ocaml_list_el2, 1, Val_int(0)); } else { Store_field(ocaml_list_el2, 1, ocaml_list_el1); } Store_field(ocaml_list_el2, 0, ocaml_var); if (i < num_vars - 1) { ocaml_list_el1 = ocaml_list_el2; ocaml_list_el2 = caml_alloc_tuple(2); } } fclose(ifile); CAMLreturn(ocaml_list_el2); }
return_val_t run_adverb( char* adverb_name, int fn_id, host_val* fixed, int num_fixed, int combine_fn_id, int combine_provided, host_val* combine_fixed, int num_combine_fixed, host_val* init, int num_init, int axes_given, int* axes, int num_axes, host_val* array_positional, int num_array_positional, char** array_keyword_names, host_val* array_keyword_values, int num_array_keyword_values) { CAMLparam0(); CAMLlocal1(fn_id_val); CAMLlocal1(combine_id_val_opt); CAMLlocal1(adverb); CAMLlocal2(fixed_actuals, combine_fixed_actuals); CAMLlocal1(array_actuals); CAMLlocal2(init_list, axes_list_option); CAMLlocal1(ocaml_result); fn_id_val = Val_int(fn_id); if (combine_provided) { combine_id_val_opt = caml_alloc_tuple(1); Store_field(combine_id_val_opt, 0, Val_int(combine_fn_id)); } else { combine_id_val_opt = Val_int(0); } printf("Making fixed args from %d fixed values and %d fixed kwds\n", num_fixed, num_fixed_keywords); fixed_actuals = mk_actual_args(fixed, num_fixed, 0, 0, 0); printf("Making fixed args for combiner\n"); combine_fixed_actuals = mk_actual_args(combine_fixed, num_combine_fixed, 0, 0, 0); printf("Making array args from %d arrays and %d kwds\n", num_array_positional, num_array_keyword_values); array_actuals = mk_actual_args(array_positional, num_array_positional, \ array_keyword_names, array_keyword_values, num_array_keyword_values); printf("Building list of %d init args\n", num_init); init_list = build_host_val_list(init, num_init); printf("Axes given? %d\n", axes_given); if (axes_given) { printf("Building %d axes\n", num_axes); axes_list_option = caml_alloc(1, 0); Store_field( axes_list_option, 0, build_int_list(axes, num_axes) ); } else { axes_list_option = Val_int(0); } printf("Calling into OCaml\n"); adverb = get_adverb(adverb_name); value func_args[8] = { adverb, fn_id_val, fixed_actuals, combine_id_val_opt, combine_fixed_actuals, init_list, axes_list_option, array_actuals }; ocaml_result = caml_callbackN(*ocaml_run_adverb, 7, func_args); printf("Returned from OCaml\n"); CAMLreturnT(return_val_t, translate_return_value(ocaml_result)); }
CAMLprim value pcre_exec_stub0( intnat v_opt, value v_rex, intnat v_pos, intnat v_subj_start, value v_subj, value v_ovec, value v_maybe_cof, value v_workspace) { int ret; int is_dfa = v_workspace != (value) NULL; long pos = v_pos, len = caml_string_length(v_subj), subj_start = v_subj_start; long ovec_len = Wosize_val(v_ovec); if (pos > len || pos < subj_start) caml_invalid_argument("Pcre.pcre_exec_stub: illegal position"); if (subj_start > len || subj_start < 0) caml_invalid_argument("Pcre.pcre_exec_stub: illegal subject start"); pos -= subj_start; len -= subj_start; { const pcre *code = get_rex(v_rex); /* Compiled pattern */ const pcre_extra *extra = get_extra(v_rex); /* Extra info */ const char *ocaml_subj = String_val(v_subj) + subj_start; /* Subject string */ const int opt = v_opt; /* Runtime options */ /* Special case when no callout functions specified */ if (v_maybe_cof == None) { int *ovec = (int *) &Field(v_ovec, 0); /* Performs the match */ if (is_dfa) ret = pcre_dfa_exec(code, extra, ocaml_subj, len, pos, opt, ovec, ovec_len, (int *) &Field(v_workspace, 0), Wosize_val(v_workspace)); else ret = pcre_exec(code, extra, ocaml_subj, len, pos, opt, ovec, ovec_len); if (ret < 0) handle_exec_error("pcre_exec_stub", ret); else handle_pcre_exec_result(ovec, v_ovec, ovec_len, subj_start, ret); } /* There are callout functions */ else { value v_cof = Field(v_maybe_cof, 0); value v_substrings; char *subj = caml_stat_alloc(sizeof(char) * len); int *ovec = caml_stat_alloc(sizeof(int) * ovec_len); int workspace_len; int *workspace; struct cod cod = { 0, (value *) NULL, (value *) NULL, (value) NULL }; struct pcre_extra new_extra = #ifdef PCRE_EXTRA_MATCH_LIMIT_RECURSION # ifdef PCRE_EXTRA_MARK # ifdef PCRE_EXTRA_EXECUTABLE_JIT { PCRE_EXTRA_CALLOUT_DATA, NULL, 0, NULL, NULL, 0, NULL, NULL }; # else { PCRE_EXTRA_CALLOUT_DATA, NULL, 0, NULL, NULL, 0, NULL }; # endif # else { PCRE_EXTRA_CALLOUT_DATA, NULL, 0, NULL, NULL, 0 }; # endif #else { PCRE_EXTRA_CALLOUT_DATA, NULL, 0, NULL, NULL }; #endif cod.subj_start = subj_start; memcpy(subj, ocaml_subj, len); Begin_roots4(v_rex, v_cof, v_substrings, v_ovec); Begin_roots1(v_subj); v_substrings = caml_alloc_small(2, 0); End_roots(); Field(v_substrings, 0) = v_subj; Field(v_substrings, 1) = v_ovec; cod.v_substrings_p = &v_substrings; cod.v_cof_p = &v_cof; new_extra.callout_data = &cod; if (extra != NULL) { new_extra.flags = PCRE_EXTRA_CALLOUT_DATA | extra->flags; new_extra.study_data = extra->study_data; new_extra.match_limit = extra->match_limit; new_extra.tables = extra->tables; #ifdef PCRE_EXTRA_MATCH_LIMIT_RECURSION new_extra.match_limit_recursion = extra->match_limit_recursion; #endif } if (is_dfa) { workspace_len = Wosize_val(v_workspace); workspace = caml_stat_alloc(sizeof(int) * workspace_len); ret = pcre_dfa_exec(code, extra, subj, len, pos, opt, ovec, ovec_len, (int *) &Field(v_workspace, 0), workspace_len); } else ret = pcre_exec(code, &new_extra, subj, len, pos, opt, ovec, ovec_len); caml_stat_free(subj); End_roots(); if (ret < 0) { if (is_dfa) caml_stat_free(workspace); caml_stat_free(ovec); if (ret == PCRE_ERROR_CALLOUT) caml_raise(cod.v_exn); else handle_exec_error("pcre_exec_stub(callout)", ret); } else { handle_pcre_exec_result(ovec, v_ovec, ovec_len, subj_start, ret); if (is_dfa) { caml_int_ptr ocaml_workspace_dst = (caml_int_ptr) &Field(v_workspace, 0); const int *workspace_src = workspace; const int *workspace_src_stop = workspace + workspace_len; while (workspace_src != workspace_src_stop) { *ocaml_workspace_dst = *workspace_src; ocaml_workspace_dst++; workspace_src++; } caml_stat_free(workspace); } caml_stat_free(ovec); } } } return Val_unit; } CAMLprim value pcre_exec_stub( intnat v_opt, value v_rex, intnat v_pos, intnat v_subj_start, value v_subj, value v_ovec, value v_maybe_cof) { return pcre_exec_stub0(v_opt, v_rex, v_pos, v_subj_start, v_subj, v_ovec, v_maybe_cof, (value) NULL); } /* Byte-code hook for pcre_exec_stub Needed, because there are more than 5 arguments */ CAMLprim value pcre_exec_stub_bc(value *argv, int __unused argn) { return pcre_exec_stub0( Int_val(argv[0]), argv[1], Int_val(argv[2]), Int_val(argv[3]), argv[4], argv[5], argv[6], (value) NULL); } /* Byte-code hook for pcre_dfa_exec_stub Needed, because there are more than 5 arguments */ CAMLprim value pcre_dfa_exec_stub_bc(value *argv, int __unused argn) { return pcre_exec_stub0( Int_val(argv[0]), argv[1], Int_val(argv[2]), Int_val(argv[3]), argv[4], argv[5], argv[6], argv[7]); } static struct custom_operations tables_ops = { "pcre_ocaml_tables", pcre_dealloc_tables, custom_compare_default, custom_hash_default, custom_serialize_default, custom_deserialize_default, custom_compare_ext_default }; /* Generates a new set of chartables for the current locale (see man page of PCRE */ CAMLprim value pcre_maketables_stub(value __unused v_unit) { /* GC will do a full cycle every 1_000_000 table set allocations (one table set consumes 864 bytes -> maximum of 864_000_000 bytes unreclaimed table sets) */ const value v_tables = caml_alloc_custom( &tables_ops, sizeof(struct pcre_ocaml_tables), 1, 1000000); set_tables(v_tables, pcre_maketables()); return v_tables; } /* Wraps around the isspace-function */ CAMLprim value pcre_isspace_stub(value v_c) { return Val_bool(isspace(Int_val(v_c))); } /* Returns number of substring associated with a name */ CAMLprim intnat pcre_get_stringnumber_stub(value v_rex, value v_name) { const int ret = pcre_get_stringnumber(get_rex(v_rex), String_val(v_name)); if (ret == PCRE_ERROR_NOSUBSTRING) caml_invalid_argument("Named string not found"); return ret; } CAMLprim value pcre_get_stringnumber_stub_bc(value v_rex, value v_name) { return Val_int(pcre_get_stringnumber_stub(v_rex, v_name)); } /* Returns array of names of named substrings in a regexp */ CAMLprim value pcre_names_stub(value v_rex) { CAMLparam0(); CAMLlocal1(v_res); int name_count; int entry_size; const char *tbl_ptr; int i; int ret = pcre_fullinfo_stub(v_rex, PCRE_INFO_NAMECOUNT, &name_count); if (ret != 0) raise_internal_error("pcre_names_stub: namecount"); ret = pcre_fullinfo_stub(v_rex, PCRE_INFO_NAMEENTRYSIZE, &entry_size); if (ret != 0) raise_internal_error("pcre_names_stub: nameentrysize"); ret = pcre_fullinfo_stub(v_rex, PCRE_INFO_NAMETABLE, &tbl_ptr); if (ret != 0) raise_internal_error("pcre_names_stub: nametable"); v_res = caml_alloc(name_count, 0); for (i = 0; i < name_count; ++i) { value v_name = caml_copy_string(tbl_ptr + 2); Store_field(v_res, i, v_name); tbl_ptr += entry_size; } CAMLreturn(v_res); } /* Generic stub for getting integer results from pcre_config */ static inline int pcre_config_int(int what) { int ret; pcre_config(what, (void *) &ret); return ret; } /* Generic stub for getting long integer results from pcre_config */ static inline int pcre_config_long(int what) { long ret; pcre_config(what, (void *) &ret); return ret; }
value grappa_CAML_inversions (value genes1, value genes2, value c_num_genes, value dist) { CAMLparam4(genes1, genes2, c_num_genes, dist); CAMLlocal3(resulttmp, result, r); List intermediate_reversals_list; int num_genes, i, /*j,*/ inv_dist; //struct genome_arr_t *genes1_arr, *genes2_arr; struct genome_struct *permutation, *origin; int *temp_genes; Reversal *rev; Reversal revrev; result = Val_int(0); /* We start with the empty list */ inv_dist = Int_val(dist); permutation = (struct genome_struct *) Data_custom_val (genes1); origin = (struct genome_struct *) Data_custom_val (genes2); /* First one in should be ancestor-- the permutation that you want to transform into the descendant (even though "origin" is a confusing thing to call descendant) */ num_genes = Int_val(c_num_genes); temp_genes = (int *)malloc(num_genes * sizeof(int)); if (0 == num_genes) CAMLreturn(result); /* Initialize list that will be used to store the sorting reversals found between the permutations at each step. */ init_list(&intermediate_reversals_list, (num_genes + 1) * num_genes, sizeof(Reversal *)); i = 0; do { clear_list(&intermediate_reversals_list); find_all_sorting_reversals(&intermediate_reversals_list, NULL, permutation, origin, num_genes, NULL); if (list_size(&intermediate_reversals_list) > 0) { revrev = list_get(&intermediate_reversals_list, 0).revelement; rev = &revrev; copy_with_reversal(temp_genes, permutation->genes, num_genes, rev); permcopy(permutation->genes, temp_genes, num_genes); r = caml_alloc_tuple(2); Store_field(r,0,Val_int(rev->start + 1)); Store_field(r,1,Val_int(rev->stop)); resulttmp = caml_alloc(2,0); Store_field(resulttmp,0,r); Store_field(resulttmp,1,result); result = resulttmp; } i++; } while (list_size(&intermediate_reversals_list) > 0); fflush(stdout); /* Change so can be stderr, too? */ CAMLreturn(result); }
CAMLexport value caml_alloc_tuple(mlsize_t n) { return caml_alloc(n, 0); }
/* Executes a pattern match with runtime options, a regular expression, a string offset, a string length, a subject string, a number of subgroup offsets, an offset vector and an optional callout function */ CAMLprim value pcre_exec_stub(value v_opt, value v_rex, value v_ofs, value v_subj, value v_subgroups2, value v_ovec, value v_maybe_cof) { const int ofs = Int_val(v_ofs), len = caml_string_length(v_subj); if (ofs > len || ofs < 0) caml_invalid_argument("Pcre.pcre_exec_stub: illegal offset"); { const pcre *code = (pcre *) Field(v_rex, 1); /* Compiled pattern */ const pcre_extra *extra = (pcre_extra *) Field(v_rex, 2); /* Extra info */ const char *ocaml_subj = String_val(v_subj); /* Subject string */ const int opt = Int_val(v_opt); /* Runtime options */ int subgroups2 = Int_val(v_subgroups2); const int subgroups2_1 = subgroups2 - 1; const int subgroups3 = (subgroups2 >> 1) + subgroups2; /* Special case when no callout functions specified */ if (v_maybe_cof == None) { int *ovec = (int *) &Field(v_ovec, 0); /* Performs the match */ const int ret = pcre_exec(code, extra, ocaml_subj, len, ofs, opt, ovec, subgroups3); if (ret < 0) { switch(ret) { case PCRE_ERROR_NOMATCH : caml_raise_constant(*pcre_exc_Not_found); case PCRE_ERROR_PARTIAL : caml_raise_constant(*pcre_exc_Partial); case PCRE_ERROR_MATCHLIMIT : caml_raise_constant(*pcre_exc_MatchLimit); case PCRE_ERROR_BADPARTIAL : caml_raise_constant(*pcre_exc_BadPartial); case PCRE_ERROR_BADUTF8 : caml_raise_constant(*pcre_exc_BadUTF8); case PCRE_ERROR_BADUTF8_OFFSET : caml_raise_constant(*pcre_exc_BadUTF8Offset); default : caml_raise_with_string(*pcre_exc_InternalError, "pcre_exec_stub"); } } else { const int *ovec_src = ovec + subgroups2_1; long int *ovec_dst = (long int *) ovec + subgroups2_1; /* Converts offsets from C-integers to OCaml-Integers This is a bit tricky, because there are 32- and 64-bit platforms around and OCaml chooses the larger possibility for representing integers when available (also in arrays) - not so the PCRE */ while (subgroups2--) { *ovec_dst = Val_int(*ovec_src); --ovec_src; --ovec_dst; } } } /* There are callout functions */ else { value v_cof = Field(v_maybe_cof, 0); value v_substrings; char *subj = caml_stat_alloc(sizeof(char) * len); int *ovec = caml_stat_alloc(sizeof(int) * subgroups3); int ret; struct cod cod = { (value *) NULL, (value *) NULL, (value) NULL }; struct pcre_extra new_extra = #ifdef PCRE_CONFIG_MATCH_LIMIT_RECURSION { PCRE_EXTRA_CALLOUT_DATA, NULL, 0, NULL, NULL, 0 }; #else { PCRE_EXTRA_CALLOUT_DATA, NULL, 0, NULL, NULL }; #endif memcpy(subj, ocaml_subj, len); Begin_roots3(v_rex, v_cof, v_substrings); Begin_roots2(v_subj, v_ovec); v_substrings = caml_alloc_small(2, 0); End_roots(); Field(v_substrings, 0) = v_subj; Field(v_substrings, 1) = v_ovec; cod.v_substrings_p = &v_substrings; cod.v_cof_p = &v_cof; new_extra.callout_data = &cod; if (extra == NULL) { ret = pcre_exec(code, &new_extra, subj, len, ofs, opt, ovec, subgroups3); } else { new_extra.flags = PCRE_EXTRA_CALLOUT_DATA | extra->flags; new_extra.study_data = extra->study_data; new_extra.match_limit = extra->match_limit; new_extra.tables = extra->tables; #ifdef PCRE_CONFIG_MATCH_LIMIT_RECURSION new_extra.match_limit_recursion = extra->match_limit_recursion; #endif ret = pcre_exec(code, &new_extra, subj, len, ofs, opt, ovec, subgroups3); } free(subj); End_roots(); if (ret < 0) { free(ovec); switch(ret) { case PCRE_ERROR_NOMATCH : caml_raise_constant(*pcre_exc_Not_found); case PCRE_ERROR_PARTIAL : caml_raise_constant(*pcre_exc_Partial); case PCRE_ERROR_MATCHLIMIT : caml_raise_constant(*pcre_exc_MatchLimit); case PCRE_ERROR_BADPARTIAL : caml_raise_constant(*pcre_exc_BadPartial); case PCRE_ERROR_BADUTF8 : caml_raise_constant(*pcre_exc_BadUTF8); case PCRE_ERROR_BADUTF8_OFFSET : caml_raise_constant(*pcre_exc_BadUTF8Offset); case PCRE_ERROR_CALLOUT : caml_raise(cod.v_exn); default : caml_raise_with_string(*pcre_exc_InternalError, "pcre_exec_stub"); } } else { int *ovec_src = ovec + subgroups2_1; long int *ovec_dst = &Field(v_ovec, 0) + subgroups2_1; while (subgroups2--) { *ovec_dst = Val_int(*ovec_src); --ovec_src; --ovec_dst; } free(ovec); } } } return Val_unit; } /* Byte-code hook for pcre_exec_stub Needed, because there are more than 5 arguments */ CAMLprim value pcre_exec_stub_bc(value *argv, int __unused argn) { return pcre_exec_stub(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]); } /* Generates a new set of chartables for the current locale (see man page of PCRE */ CAMLprim value pcre_maketables_stub(value __unused v_unit) { /* GC will do a full cycle every 100 table set allocations (one table set consumes 864 bytes -> maximum of 86400 bytes unreclaimed table sets) */ const value v_res = caml_alloc_final(2, pcre_dealloc_tables, 864, 86400); Field(v_res, 1) = (value) pcre_maketables(); return v_res; } /* Wraps around the isspace-function */ CAMLprim value pcre_isspace_stub(value v_c) { return Val_bool(isspace(Int_val(v_c))); } /* Returns number of substring associated with a name */ CAMLprim value pcre_get_stringnumber_stub(value v_rex, value v_name) { const int ret = pcre_get_stringnumber((pcre *) Field(v_rex, 1), String_val(v_name)); if (ret == PCRE_ERROR_NOSUBSTRING) caml_invalid_argument("Named string not found"); return Val_int(ret); } /* Returns array of names of named substrings in a regexp */ CAMLprim value pcre_names_stub(value v_rex) { CAMLparam0(); CAMLlocal1(v_res); int name_count; int entry_size; const char *tbl_ptr; int i; int ret = pcre_fullinfo_stub(v_rex, PCRE_INFO_NAMECOUNT, &name_count); if (ret != 0) caml_raise_with_string(*pcre_exc_InternalError, "pcre_names_stub"); ret = pcre_fullinfo_stub(v_rex, PCRE_INFO_NAMEENTRYSIZE, &entry_size); if (ret != 0) caml_raise_with_string(*pcre_exc_InternalError, "pcre_names_stub"); ret = pcre_fullinfo_stub(v_rex, PCRE_INFO_NAMETABLE, &tbl_ptr); if (ret != 0) caml_raise_with_string(*pcre_exc_InternalError, "pcre_names_stub"); v_res = caml_alloc(name_count, 0); for (i = 0; i < name_count; ++i) { value v_name = caml_copy_string(tbl_ptr + 2); Store_field(v_res, i, v_name); tbl_ptr += entry_size; } CAMLreturn(v_res); }
CAMLprim value win_stat(value path, value wpath) { int res, mode; HANDLE h; BY_HANDLE_FILE_INFORMATION info; CAMLparam2(path,wpath); CAMLlocal1 (v); h = CreateFileW ((LPCWSTR) String_val (wpath), 0, 0, NULL, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS | FILE_ATTRIBUTE_READONLY, NULL); if (h == INVALID_HANDLE_VALUE) { win32_maperr (GetLastError ()); uerror("stat", path); } res = GetFileInformationByHandle (h, &info); if (res == 0) { win32_maperr (GetLastError ()); (void) CloseHandle (h); uerror("stat", path); } res = CloseHandle (h); if (res == 0) { win32_maperr (GetLastError ()); uerror("stat", path); } v = caml_alloc (12, 0); Store_field (v, 0, Val_int (info.dwVolumeSerialNumber)); // Apparently, we cannot trust the inode number to be stable when // nFileIndexHigh is 0. if (info.nFileIndexHigh == 0) info.nFileIndexLow = 0; /* The ocaml code truncates inode numbers to 31 bits. We hash the low and high parts in order to lose as little information as possible. */ Store_field (v, 1, Val_int (MAKEDWORDLONG(info.nFileIndexLow,info.nFileIndexHigh)+155825701*((DWORDLONG)info.nFileIndexHigh))); Store_field (v, 2, Val_int (info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY ? 1: 0)); mode = 0000444; if (info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) mode |= 0000111; if (!(info.dwFileAttributes & FILE_ATTRIBUTE_READONLY)) mode |= 0000222; Store_field (v, 3, Val_int(mode)); Store_field (v, 4, Val_int (info.nNumberOfLinks)); Store_field (v, 5, Val_int (0)); Store_field (v, 6, Val_int (0)); Store_field (v, 7, Val_int (0)); Store_field (v, 8, copy_int64(MAKEDWORDLONG(info.nFileSizeLow,info.nFileSizeHigh))); Store_field (v, 9, copy_double((double) FILETIME_TO_TIME(info.ftLastAccessTime))); Store_field (v, 10, copy_double((double) FILETIME_TO_TIME(info.ftLastWriteTime))); Store_field (v, 11, copy_double((double) FILETIME_TO_TIME(info.ftCreationTime))); CAMLreturn (v); }
value spoc_getCudaDevice(value i) { CAMLparam1(i); CAMLlocal4(general_info, cuda_info, specific_info, gc_info); CAMLlocal3(device, maxT, maxG); int nb_devices; CUdevprop dev_infos; CUdevice dev; CUcontext ctx; CUstream queue[2]; spoc_cu_context *spoc_ctx; //CUcontext gl_ctx; char infoStr[1024]; int infoInt; size_t infoUInt; int major, minor; enum cudaError_enum cuda_error; cuDeviceGetCount (&nb_devices); if ((Int_val(i)) > nb_devices) raise_constant(*caml_named_value("no_cuda_device")) ; CUDA_CHECK_CALL(cuDeviceGet(&dev, Int_val(i))); CUDA_CHECK_CALL(cuDeviceGetProperties(&dev_infos, dev)); general_info = caml_alloc (9, 0); CUDA_CHECK_CALL(cuDeviceGetName(infoStr, sizeof(infoStr), dev)); Store_field(general_info,0, copy_string(infoStr));// CUDA_CHECK_CALL(cuDeviceTotalMem(&infoUInt, dev)); Store_field(general_info,1, Val_int(infoUInt));// Store_field(general_info,2, Val_int(dev_infos.sharedMemPerBlock));// Store_field(general_info,3, Val_int(dev_infos.clockRate));// Store_field(general_info,4, Val_int(dev_infos.totalConstantMemory));// CUDA_CHECK_CALL(cuDeviceGetAttribute(&infoInt, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, dev)); Store_field(general_info,5, Val_int(infoInt));// CUDA_CHECK_CALL(cuDeviceGetAttribute(&infoInt, CU_DEVICE_ATTRIBUTE_ECC_ENABLED, dev)); Store_field(general_info,6, Val_bool(infoInt));// Store_field(general_info,7, i); CUDA_CHECK_CALL(cuCtxCreate (&ctx, CU_CTX_SCHED_BLOCKING_SYNC | CU_CTX_MAP_HOST, dev)); spoc_ctx = malloc(sizeof(spoc_cl_context)); spoc_ctx->ctx = ctx; CUDA_CHECK_CALL(cuStreamCreate(&queue[0], 0)); CUDA_CHECK_CALL(cuStreamCreate(&queue[1], 0)); spoc_ctx->queue[0] = queue[0]; spoc_ctx->queue[1] = queue[1]; Store_field(general_info,8, (value)spoc_ctx); CUDA_CHECK_CALL(cuCtxSetCurrent(ctx)); cuda_info = caml_alloc(1, 0); //0 -> Cuda specific_info = caml_alloc(18, 0); cuDeviceComputeCapability(&major, &minor, dev); Store_field(specific_info,0, Val_int(major));// Store_field(specific_info,1, Val_int(minor));// Store_field(specific_info,2, Val_int(dev_infos.regsPerBlock));// Store_field(specific_info,3, Val_int(dev_infos.SIMDWidth));// Store_field(specific_info,4, Val_int(dev_infos.memPitch));// Store_field(specific_info,5, Val_int(dev_infos.maxThreadsPerBlock));// maxT = caml_alloc(3, 0); Store_field(maxT,0, Val_int(dev_infos.maxThreadsDim[0]));// Store_field(maxT,1, Val_int(dev_infos.maxThreadsDim[1]));// Store_field(maxT,2, Val_int(dev_infos.maxThreadsDim[2]));// Store_field(specific_info,6, maxT); maxG = caml_alloc(3, 0); Store_field(maxG,0, Val_int(dev_infos.maxGridSize[0]));// Store_field(maxG,1, Val_int(dev_infos.maxGridSize[1]));// Store_field(maxG,2, Val_int(dev_infos.maxGridSize[2]));// Store_field(specific_info,7, maxG); Store_field(specific_info,8, Val_int(dev_infos.textureAlign));// cuDeviceGetAttribute(&infoInt, CU_DEVICE_ATTRIBUTE_GPU_OVERLAP, dev); Store_field(specific_info,9, Val_bool(infoInt));// cuDeviceGetAttribute(&infoInt, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, dev); Store_field(specific_info,10, Val_bool(infoInt));// cuDeviceGetAttribute(&infoInt, CU_DEVICE_ATTRIBUTE_INTEGRATED, dev); Store_field(specific_info,11, Val_bool(infoInt));// cuDeviceGetAttribute(&infoInt, CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY, dev); Store_field(specific_info,12, Val_bool(infoInt));// cuDeviceGetAttribute(&infoInt, CU_DEVICE_ATTRIBUTE_COMPUTE_MODE, dev); Store_field(specific_info,13, Val_int(infoInt));// cuDeviceGetAttribute(&infoInt, CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS, dev); Store_field(specific_info,14, Val_bool(infoInt));// cuDeviceGetAttribute(&infoInt, CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, dev); Store_field(specific_info,15, Val_int(infoInt)); cuDeviceGetAttribute(&infoInt, CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, dev); Store_field(specific_info,16, Val_int(infoInt)); cuDriverGetVersion(&infoInt); Store_field(specific_info, 17, Val_int(infoInt)); Store_field(cuda_info, 0, specific_info); device = caml_alloc(4, 0); Store_field(device, 0, general_info); Store_field(device, 1, cuda_info); {spoc_cuda_gc_info* gcInfo = (spoc_cuda_gc_info*)malloc(sizeof(spoc_cuda_gc_info)); CUDA_CHECK_CALL(cuMemGetInfo(&infoUInt, NULL)); infoUInt -= (32*1024*1024); Store_field(device, 2, (value)gcInfo); {cuda_event_list* events = NULL; Store_field(device, 3, (value)events); CAMLreturn(device);}} }
UWT_LOCAL void uwt__gr_enlarge__(void) { CAMLparam0(); CAMLlocal1(nroot); cb_t i; cb_t * t; if ( uwt__global_caml_root == Val_unit ){ enum { AR_INIT_SIZE = 2}; nroot = caml_alloc(GR_ROOT_INIT_SIZE,0); for ( i = 0 ; i < GR_ROOT_INIT_SIZE ; ++i ){ Field(nroot,i) = Val_unit; } uwt__global_caml_root = caml_alloc_small(AR_INIT_SIZE,0); Field(uwt__global_caml_root,0) = nroot; for ( i = 1 ; i < AR_INIT_SIZE ; ++i ){ Field(uwt__global_caml_root,i) = Val_unit; } t = malloc(AR_INIT_SIZE * GR_ROOT_INIT_SIZE * sizeof(*t)); if ( t == NULL ){ caml_raise_out_of_memory(); } for ( i = 0; i < GR_ROOT_INIT_SIZE; ++i ){ t[i] = i; } uwt__global_caml_root_free_pos = t; uwt__global_caml_root_size = GR_ROOT_INIT_SIZE; caml_register_generational_global_root(&uwt__global_caml_root); } else { const cb_t ri = (uwt__global_caml_root_size + (GR_ROOT_INIT_SIZE - 1)) / GR_ROOT_INIT_SIZE; const size_t ar_size = Wosize_val(uwt__global_caml_root); const cb_t nroot_size = uwt__global_caml_root_size + GR_ROOT_INIT_SIZE; if ( uwt__global_caml_root_size > nroot_size ){ caml_failwith("too many lwt threads waiting for i/o"); } if ( ri >= ar_size ){ uint64_t cn_size = ar_size * (uint64_t)(2 * GR_ROOT_INIT_SIZE); if ( cn_size > UINT_MAX ){ cn_size = UINT_MAX; } nroot = caml_alloc(ar_size*2,0); for ( i = 0 ; i < ar_size ; ++i ){ Store_field(nroot,i,Field(uwt__global_caml_root,i)); } for ( i = ar_size ; i < ar_size * 2 ; ++i ){ Field(nroot,i) = Val_unit; } t = realloc(uwt__global_caml_root_free_pos,cn_size * sizeof(*t)); if ( t == NULL ){ caml_raise_out_of_memory(); } caml_modify_generational_global_root(&uwt__global_caml_root,nroot); uwt__global_caml_root_free_pos = t; } nroot = caml_alloc(GR_ROOT_INIT_SIZE,0); cb_t j; for ( i = 0, j = uwt__global_caml_root_size ; i < GR_ROOT_INIT_SIZE ; ++i, ++j ){ Field(nroot,i) = Val_unit; uwt__global_caml_root_free_pos[j] = j; } Store_field(uwt__global_caml_root,ri,nroot); uwt__global_caml_root_size = nroot_size; } CAMLreturn0; }
value v2v_xml_parse_uri (value strv) { CAMLparam1 (strv); CAMLlocal3 (rv, sv, ov); xmlURIPtr uri; uri = xmlParseURI (String_val (strv)); if (uri == NULL) caml_invalid_argument ("parse_uri: unable to parse URI"); rv = caml_alloc_tuple (9); /* field 0: uri_scheme : string option */ if (uri->scheme) { sv = caml_copy_string (uri->scheme); ov = caml_alloc (1, 0); Store_field (ov, 0, sv); } else ov = Val_int (0); Store_field (rv, 0, ov); /* field 1: uri_opaque : string option */ if (uri->opaque) { sv = caml_copy_string (uri->opaque); ov = caml_alloc (1, 0); Store_field (ov, 0, sv); } else ov = Val_int (0); Store_field (rv, 1, ov); /* field 2: uri_authority : string option */ if (uri->authority) { sv = caml_copy_string (uri->authority); ov = caml_alloc (1, 0); Store_field (ov, 0, sv); } else ov = Val_int (0); Store_field (rv, 2, ov); /* field 3: uri_server : string option */ if (uri->server) { sv = caml_copy_string (uri->server); ov = caml_alloc (1, 0); Store_field (ov, 0, sv); } else ov = Val_int (0); Store_field (rv, 3, ov); /* field 4: uri_user : string option */ if (uri->user) { sv = caml_copy_string (uri->user); ov = caml_alloc (1, 0); Store_field (ov, 0, sv); } else ov = Val_int (0); Store_field (rv, 4, ov); /* field 5: uri_port : int */ Store_field (rv, 5, Val_int (uri->port)); /* field 6: uri_path : string option */ if (uri->path) { sv = caml_copy_string (uri->path); ov = caml_alloc (1, 0); Store_field (ov, 0, sv); } else ov = Val_int (0); Store_field (rv, 6, ov); /* field 7: uri_fragment : string option */ if (uri->fragment) { sv = caml_copy_string (uri->fragment); ov = caml_alloc (1, 0); Store_field (ov, 0, sv); } else ov = Val_int (0); Store_field (rv, 7, ov); /* field 8: uri_query_raw : string option */ if (uri->query_raw) { sv = caml_copy_string (uri->query_raw); ov = caml_alloc (1, 0); Store_field (ov, 0, sv); } else ov = Val_int (0); Store_field (rv, 8, ov); xmlFreeURI (uri); CAMLreturn (rv); }
static ssize_t recv_buffer(int fd, int fds[3]) { struct iovec iov = { .iov_base = buffer, .iov_len = sizeof(buffer) }; struct msghdr msg = { .msg_iov = &iov, .msg_iovlen = 1, .msg_controllen = CMSG_SPACE(3 * sizeof(int)), }; msg.msg_control = alloca(msg.msg_controllen); memset(msg.msg_control, 0, msg.msg_controllen); ssize_t recvd; NO_EINTR(recvd, recvmsg(fd, &msg, 0)); if (recvd == -1) { perror("recvmsg"); return -1; } if (recvd < 4) { ssize_t recvd_; do { NO_EINTR(recvd_, recv(fd, buffer + recvd, sizeof(buffer) - recvd, 0)); if (recvd_ > 0) recvd += recvd_; } while (recvd_ > 0 && recvd < 4); } size_t target = -1; if (recvd > 4) { target = unbyte(buffer[0],0) | unbyte(buffer[1],1) | unbyte(buffer[2],2) | unbyte(buffer[3],3); if (recvd < target) { ssize_t recvd_; do { NO_EINTR(recvd_, recv(fd, buffer + recvd, sizeof(buffer) - recvd, 0)); if (recvd_ > 0) recvd += recvd_; } while (recvd_ > 0 && recvd < target); } } struct cmsghdr *cm = CMSG_FIRSTHDR(&msg); int *fds0 = (int*)CMSG_DATA(cm); int nfds = (cm->cmsg_len - CMSG_LEN(0)) / sizeof(int); /* Check malformed packet */ if (nfds != 3 || recvd != target || buffer[recvd-1] != '\0') { int i; for (i = 0; i < nfds; ++i) close(fds0[i]); return -1; } fds[0] = fds0[0]; fds[1] = fds0[1]; fds[2] = fds0[2]; return recvd; } value ml_merlin_server_setup(value path, value strfd) { CAMLparam2(path, strfd); CAMLlocal2(payload, ret); char *endptr = NULL; int fd = strtol(String_val(strfd), &endptr, 0); if (endptr && *endptr == '\0') { /* (path, fd) */ payload = caml_alloc(2, 0); Store_field(payload, 0, path); Store_field(payload, 1, Val_int(fd)); /* Some payload */ ret = caml_alloc(1, 0); Store_field(ret, 0, payload); } else { fprintf(stderr, "ml_merlin_server_setup(\"%s\",\"%s\"): invalid argument\n", String_val(path), String_val(strfd)); unlink(String_val(path)); /* None */ ret = Val_unit; } CAMLreturn(ret); } value ml_merlin_server_accept(value server, value val_timeout) { CAMLparam2(server, val_timeout); CAMLlocal4(ret, client, args, context); // Compute timeout double timeout = Double_val(val_timeout); struct timeval tv; tv.tv_sec = timeout; tv.tv_usec = (timeout - tv.tv_sec) * 1000000; // Select on server int serverfd = Int_val(Field(server, 1)); int selectres; fd_set readset; do { FD_ZERO(&readset); FD_SET(serverfd, &readset); selectres = select(serverfd + 1, &readset, NULL, NULL, &tv); } while (selectres == -1 && errno == EINTR); int fds[3], clientfd; ssize_t len = -1; if (selectres > 0) { NO_EINTR(clientfd, accept(serverfd, NULL, NULL)); len = recv_buffer(clientfd, fds); } if (len == -1) ret = Val_unit; /* None */ else { context = caml_alloc(4, 0); /* (clientfd, stdin, stdout, stderr) */ Store_field(context, 0, Val_int(clientfd)); Store_field(context, 1, Val_int(fds[0])); Store_field(context, 2, Val_int(fds[1])); Store_field(context, 3, Val_int(fds[2])); ssize_t i, j; int argc = 0; for (i = 4; i < len; ++i) if (buffer[i] == '\0') argc += 1; args = caml_alloc(argc, 0); argc = 0; for (i = 4, j = 4; i < len; ++i) { if (buffer[i] == '\0') { Store_field(args, argc, caml_copy_string((const char *)&buffer[j])); j = i + 1; argc += 1; } } client = caml_alloc(2, 0); /* (context, args) */ Store_field(client, 0, context); Store_field(client, 1, args); ret = caml_alloc(1, 0); /* Some client */ Store_field(ret, 0, client); } CAMLreturn(ret); }