CAMLprim value iface_addr(value ifap) { CAMLparam0(); CAMLlocal2(ret, opt); struct ifaddrs *c_ifap = (struct ifaddrs *)ifap; if(c_ifap->ifa_addr == NULL) CAMLreturn(Val_int(0)); uint16_t family = c_ifap->ifa_addr->sa_family; if (family != AF_INET) opt = Val_int(0); else { opt = caml_alloc(1, 0); ret = caml_alloc(3, 0); Store_field(ret, 0, caml_copy_int32(ipv4_of_sockaddr(c_ifap->ifa_addr))); Store_field(ret, 1, caml_copy_int32(ipv4_of_sockaddr(c_ifap->ifa_netmask))); #if defined (__linux__) Store_field(ret, 2, caml_copy_int32(ipv4_of_sockaddr(c_ifap->ifa_flags & IFF_BROADCAST ? c_ifap->ifa_ifu.ifu_broadaddr : c_ifap->ifa_ifu.ifu_dstaddr ))); #elif defined(__APPLE__) && defined (__MACH__) Store_field(ret, 2, caml_copy_int32(ipv4_of_sockaddr(c_ifap->ifa_dstaddr))); #endif Store_field(opt, 0, ret); } CAMLreturn(opt); }
static value Val_physinfo(libxl_physinfo *c_val) { CAMLparam0(); CAMLlocal2(v, hwcap); int i; hwcap = caml_alloc_tuple(8); for (i = 0; i < 8; i++) Store_field(hwcap, i, caml_copy_int32(c_val->hw_cap[i])); v = caml_alloc_tuple(11); Store_field(v, 0, Val_int(c_val->threads_per_core)); Store_field(v, 1, Val_int(c_val->cores_per_socket)); Store_field(v, 2, Val_int(c_val->max_cpu_id)); Store_field(v, 3, Val_int(c_val->nr_cpus)); Store_field(v, 4, Val_int(c_val->cpu_khz)); Store_field(v, 5, caml_copy_int64(c_val->total_pages)); Store_field(v, 6, caml_copy_int64(c_val->free_pages)); Store_field(v, 7, caml_copy_int64(c_val->scrub_pages)); Store_field(v, 8, Val_int(c_val->nr_nodes)); Store_field(v, 9, hwcap); Store_field(v, 10, caml_copy_int32(c_val->phys_cap)); CAMLreturn(v); }
CAMLprim value stub_nvml_device_get_pci_info( value ml_interface, value ml_device) { CAMLparam2(ml_interface, ml_device); CAMLlocal1(ml_pci_info); nvmlReturn_t error; nvmlInterface* interface; nvmlPciInfo_t pci_info; nvmlDevice_t device; interface = (nvmlInterface*)ml_interface; device = *(nvmlDevice_t*)ml_device; error = interface->deviceGetPciInfo(device, &pci_info); check_error(interface, error); ml_pci_info = caml_alloc(6, 0); Store_field(ml_pci_info, 0, caml_copy_string(pci_info.busId)); Store_field(ml_pci_info, 1, caml_copy_int32(pci_info.domain)); Store_field(ml_pci_info, 2, caml_copy_int32(pci_info.bus)); Store_field(ml_pci_info, 3, caml_copy_int32(pci_info.device)); Store_field(ml_pci_info, 4, caml_copy_int32(pci_info.pciDeviceId)); Store_field(ml_pci_info, 5, caml_copy_int32(pci_info.pciSubSystemId)); CAMLreturn(ml_pci_info); }
void proc_start( const char* logdir, const uint32_t analysis_id, const char* sockname, bool debug_flag, const THREADID tid, char** argvp, int envc, char** envp ) { CAMLparam0(); CAMLlocalN( caml_args, 8 ); static value *proc_start_closure = NULL; if ( !proc_start_closure ) { proc_start_closure = caml_named_value( "proc_start" ); } caml_args[0] = caml_copy_string( logdir ); caml_args[1] = caml_copy_int32( analysis_id ); caml_args[2] = caml_copy_string( sockname ); caml_args[3] = Val_bool( debug_flag ); caml_args[4] = Val_int( tid ); caml_args[5] = caml_copy_nativeint( (long) argvp ); caml_args[6] = caml_copy_int32( envc ); caml_args[7] = caml_copy_nativeint( (long) envp ); caml_callbackN( *proc_start_closure, 8, caml_args ); CAMLreturn0; }
CAMLprim value stub_xenctrlext_get_runstate_info(value xch, value domid) { CAMLparam2(xch, domid); #if defined(XENCTRL_HAS_GET_RUNSTATE_INFO) CAMLlocal1(result); xc_runstate_info_t info; int retval; retval = xc_get_runstate_info(_H(xch), _D(domid), &info); if (retval < 0) failwith_xc(_H(xch)); /* Store 0 : state (int32) 1 : missed_changes (int32) 2 : state_entry_time (int64) 3-8 : times (int64s) */ result = caml_alloc_tuple(9); Store_field(result, 0, caml_copy_int32(info.state)); Store_field(result, 1, caml_copy_int32(info.missed_changes)); Store_field(result, 2, caml_copy_int64(info.state_entry_time)); Store_field(result, 3, caml_copy_int64(info.time[0])); Store_field(result, 4, caml_copy_int64(info.time[1])); Store_field(result, 5, caml_copy_int64(info.time[2])); Store_field(result, 6, caml_copy_int64(info.time[3])); Store_field(result, 7, caml_copy_int64(info.time[4])); Store_field(result, 8, caml_copy_int64(info.time[5])); CAMLreturn(result); #else caml_failwith("XENCTRL_HAS_GET_RUNSTATE_INFO not defined"); #endif }
CAMLprim value brlapiml_expandKeyCode(value handle, value camlKeyCode) { CAMLparam2(handle, camlKeyCode); CAMLlocal1(result); brlapi_expandedKeyCode_t ekc; brlapiCheckError(expandKeyCode, Int64_val(camlKeyCode), &ekc); result = caml_alloc_tuple(4); Store_field(result, 0, caml_copy_int32(ekc.type)); Store_field(result, 1, caml_copy_int32(ekc.command)); Store_field(result, 2, caml_copy_int32(ekc.argument)); Store_field(result, 2, caml_copy_int32(ekc.flags)); CAMLreturn(result); }
CAMLprim value caml_int32_mod(value v1, value v2) { int32 dividend = Int32_val(v1); int32 divisor = Int32_val(v2); if (divisor == 0) caml_raise_zero_divide(); /* PR#4740: on some processors, modulus crashes if division overflows. Implement the same behavior as for type "int". */ if (dividend == (1<<31) && divisor == -1) return caml_copy_int32(0); #ifdef NONSTANDARD_DIV_MOD return caml_copy_int32(caml_safe_mod(dividend, divisor)); #else return caml_copy_int32(dividend % divisor); #endif }
CAMLprim value stub_xenctrlext_get_boot_cpufeatures(value xch) { CAMLparam1(xch); #if defined(XENCTRL_HAS_GET_CPUFEATURES) CAMLlocal1(v); uint32_t a, b, c, d, e, f, g, h; int ret; ret = xc_get_boot_cpufeatures(_H(xch), &a, &b, &c, &d, &e, &f, &g, &h); if (ret < 0) failwith_xc(_H(xch)); v = caml_alloc_tuple(8); Store_field(v, 0, caml_copy_int32(a)); Store_field(v, 1, caml_copy_int32(b)); Store_field(v, 2, caml_copy_int32(c)); Store_field(v, 3, caml_copy_int32(d)); Store_field(v, 4, caml_copy_int32(e)); Store_field(v, 5, caml_copy_int32(f)); Store_field(v, 6, caml_copy_int32(g)); Store_field(v, 7, caml_copy_int32(h)); CAMLreturn(v); #else caml_failwith("XENCTRL_HAS_GET_CPUFEATURES not defined"); #endif }
CAMLprim value caml_sfIpAddress_toInteger(value addr) { const char *f_name = "SFIpAddress.toInteger"; sfUint32 int_addr = sfIpAddress_toInteger(SfIpAddress_val(addr, f_name)); return caml_copy_int32(int_addr); }
value caml_inotify_convert(value buf) { CAMLparam1(buf); CAMLlocal3(event, list, next); list = next = Val_emptylist; struct inotify_event ievent; memcpy(&ievent, String_val(buf), sizeof(struct inotify_event)); int flag; for (flag = 0; inotify_return_table[flag]; flag++) { if (!(ievent.mask & inotify_return_table[flag])) continue; next = caml_alloc_small(2, Tag_cons); Field(next, 0) = Val_int(flag); Field(next, 1) = list; list = next; } event = caml_alloc_tuple(4); Store_field(event, 0, Val_int(ievent.wd)); Store_field(event, 1, list); Store_field(event, 2, caml_copy_int32(ievent.cookie)); Store_field(event, 3, Val_int(ievent.len)); CAMLreturn(event); }
value IDAOCaml_get_byte(value ea) { CAMLparam1(ea); CAMLlocal1(ret); ret = caml_copy_int32(wrap_get_byte(Ea_val(ea))); CAMLreturn(ret); }
value IDAOCaml_get_screen_ea(value unit) { CAMLparam1(unit); CAMLlocal1(ret); ret = caml_copy_int32(wrap_get_screen_ea()); CAMLreturn(ret); }
CAMLprim value zlib_crc32( value src, value len ) { CAMLparam2(src,len); CAMLlocal1(result); uLong crc = crc32(0L, (Bytef*)(String_val(src)), Int_val(len)); result = caml_copy_int32(crc); CAMLreturn(result); }
CAMLprim value caml_udpv4_recvfrom(value v_fd, value v_str, value v_off, value v_len, value v_src) { CAMLparam5(v_fd, v_str, v_off, v_len, v_src); CAMLlocal3(v_ret, v_err, v_inf); unsigned char *buf = String_val(v_str) + Int_val(v_off); size_t len = Int_val(v_len); int fd = Int_val(v_fd); struct sockaddr_in sa; socklen_t sa_len = sizeof(sa); int r = recvfrom(fd, (void *)buf, len, MSG_DONTWAIT, (struct sockaddr *)&sa, &sa_len); if (r < 0) { if (errno == EAGAIN || errno==EWOULDBLOCK) Val_WouldBlock(v_ret); else { v_err = caml_copy_string(strerror(errno)); Val_Err(v_ret, v_err); } } else { v_inf = caml_alloc_tuple(3); Store_field(v_inf, 0, caml_copy_int32(ntohl(sa.sin_addr.s_addr))); Store_field(v_inf, 1, Val_int(ntohs(sa.sin_port))); Store_field(v_inf, 2, Val_int(r)); Val_OK(v_ret, v_inf); } CAMLreturn(v_ret); }
CAMLprim value siocgifconf_c(value caml_socket) { CAMLparam1(caml_socket); int socket = Int_val(caml_socket); struct ifreq ifr[IFCONF_MAXLEN]; struct ifconf ifc; int i; value caml_iflist = Val_int(0); value new_if; value pair; struct sockaddr_in *sin; ifc.ifc_len = IFCONF_MAXLEN; ifc.ifc_req = ifr; FI(socket, SIOCGIFCONF, &ifc); for (i = 0; i < ifc.ifc_len; i++) { /* ifname:string * ipaddr:int32 */ pair = caml_alloc(2, 0); Store_field(pair, 0, caml_copy_string(ifc.ifc_req[i].ifr_name)); sin = (struct sockaddr_in *)&ifc.ifc_req[i].ifr_addr; Store_field(pair, 1, caml_copy_int32(sin->sin_addr.s_addr)); /* next list node */ new_if = caml_alloc(2, 0); Store_field(new_if, 0, pair); Store_field(new_if, 1, caml_iflist); caml_iflist = new_if; } RESULT(caml_iflist, 0); }
CAMLprim value caml_tcpv4_accept(value v_fd) { CAMLparam1(v_fd); CAMLlocal4(v_ret,v_err,v_ca,v_ip); int r, fd=Int_val(v_fd); struct sockaddr_in sa; socklen_t len = sizeof sa; r = accept(fd, (struct sockaddr *)&sa, &len); if (r < 0) { if (errno == EWOULDBLOCK || errno == EAGAIN) Val_WouldBlock(v_ret); else { v_err = caml_copy_string(strerror(errno)); Val_Err(v_ret, v_err); } } else { setnonblock(r); v_ip = caml_copy_int32(ntohl(sa.sin_addr.s_addr)); v_ca = caml_alloc(3,0); Store_field(v_ca, 0, Val_int(r)); Store_field(v_ca, 1, v_ip); Store_field(v_ca, 2, Val_int(ntohs(sa.sin_port))); Val_OK(v_ret, v_ca); } CAMLreturn(v_ret); }
value stub_inotify_convert(value buf) { CAMLparam1(buf); CAMLlocal3(event, l, tmpl); struct inotify_event ev; int i; l = Val_emptylist; tmpl = Val_emptylist; memcpy(&ev, String_val(buf), sizeof(struct inotify_event)); for (i = 0; inotify_return_table[i]; i++) { if (!(ev.mask & inotify_return_table[i])) continue; tmpl = caml_alloc_small(2, Tag_cons); Field(tmpl, 0) = Val_int(i); Field(tmpl, 1) = l; l = tmpl; } event = caml_alloc_tuple(4); Store_field(event, 0, Val_int(ev.wd)); Store_field(event, 1, l); Store_field(event, 2, caml_copy_int32(ev.cookie)); Store_field(event, 3, Val_int(ev.len)); CAMLreturn(event); }
CAMLprim value caml_SDL_SurfaceGetPixel8(value surface, value x, value y) { Uint8 *pixels = (Uint8 *) SDL_Surface_val(surface)->pixels; int width = SDL_Surface_val(surface)->w; int ofs = (Long_val(y) * width) + Long_val(x); return caml_copy_int32(pixels[ofs]); }
void proc_end( unsigned int bbl_cnt ) { CAMLparam0(); value *proc_end_closure = caml_named_value( "proc_end" ); caml_callback( *proc_end_closure, caml_copy_int32( bbl_cnt ) ); CAMLreturn0; }
value ml_crc32_final(value custom) { CAMLparam1 (custom); CAMLlocal1 (uint); u_int32_t *context = crc32_custom_val(custom); uint = caml_copy_int32(CrcGetDigest(context)); CAMLreturn (uint); }
paranode mk_int32_paranode(int32_t i, source_info_t *src_info) { //printf("C: mk_int32: %d\n", i); CAMLparam0(); CAMLlocal1(val); val = caml_alloc(1, PARNUM_INT32); Store_field(val, 0, caml_copy_int32(i)); CAMLreturnT(paranode, mk_num(val, src_info)); }
inline value caml_to_c_epoll_event_flags(value caml) { int res = 0; int size = Wosize_val(caml); int register i; for(i = 0; i < size; i++){ res |= caml_epoll_events[Int_val(Field(caml, i))]; } return caml_copy_int32(res); }
value caml_ba_get_N(value vb, value * vind, int nind) { struct caml_ba_array * b = Caml_ba_array_val(vb); intnat index[CAML_BA_MAX_NUM_DIMS]; int i; intnat offset; /* Check number of indices = number of dimensions of array (maybe not necessary if ML typing guarantees this) */ if (nind != b->num_dims) caml_invalid_argument("Bigarray.get: wrong number of indices"); /* Compute offset and check bounds */ for (i = 0; i < b->num_dims; i++) index[i] = Long_val(vind[i]); offset = caml_ba_offset(b, index); /* Perform read */ switch ((b->flags) & CAML_BA_KIND_MASK) { default: Assert(0); #ifdef _KERNEL #else case CAML_BA_FLOAT32: return caml_copy_double(((float *) b->data)[offset]); case CAML_BA_FLOAT64: return caml_copy_double(((double *) b->data)[offset]); #endif case CAML_BA_SINT8: return Val_int(((int8 *) b->data)[offset]); case CAML_BA_UINT8: return Val_int(((uint8 *) b->data)[offset]); case CAML_BA_SINT16: return Val_int(((int16 *) b->data)[offset]); case CAML_BA_UINT16: return Val_int(((uint16 *) b->data)[offset]); case CAML_BA_INT32: return caml_copy_int32(((int32 *) b->data)[offset]); case CAML_BA_INT64: return caml_copy_int64(((int64 *) b->data)[offset]); case CAML_BA_NATIVE_INT: return caml_copy_nativeint(((intnat *) b->data)[offset]); case CAML_BA_CAML_INT: return Val_long(((intnat *) b->data)[offset]); #ifdef _KERNEL #else case CAML_BA_COMPLEX32: { float * p = ((float *) b->data) + offset * 2; return copy_two_doubles(p[0], p[1]); } case CAML_BA_COMPLEX64: { double * p = ((double *) b->data) + offset * 2; return copy_two_doubles(p[0], p[1]); } #endif } }
value IDAOCaml_find_func_begin(value ea) { CAMLparam1(ea); CAMLlocal1(retval); ea_t fb; fb = wrap_find_func_begin(Ea_val(ea)); if(fb != -1) { retval = caml_alloc_small(1, 0); Field(retval, 0) = caml_copy_int32(fb); /* Some */ } else retval = Val_int(0); CAMLreturn(retval); }
/* Raises Brlapi_exception */ static void BRLAPI_STDCALL raise_brlapi_exception(int err, brlapi_packetType_t type, const void *packet, size_t size) { static value *exception = NULL; int i; CAMLlocal2(str, res); str = caml_alloc_string(size); for (i=0; i<size; i++) Byte(str, i) = ((char *) packet)[i]; if (exception==NULL) exception = caml_named_value("Brlapi_exception"); res = caml_alloc (4, 0); Store_field(res, 0, *exception); Store_field(res, 1, Val_int(err)); Store_field(res, 2, caml_copy_int32(type)); Store_field(res, 3, str); caml_raise(res); }
CAMLprim value do_cpuid(value leaf, value word) { int32_t eax, ebx, ecx, edx, tmp; CAMLparam2(leaf, word); CAMLlocal1(rv); eax = Int32_val(leaf); ecx = Int32_val(word); /* Execute CPUID; the MOVs are because ocamlc uses -fPIC and * 32-bit gcc won't let you just use "=b" to get at %ebx in PIC */ asm("mov %%ebx, %4 ; cpuid ; mov %%ebx, %1 ; mov %4, %%ebx " : "+a" (eax), "=r" (ebx), "+c" (ecx), "=d" (edx), "=r" (tmp)); /* Wrap the return value up as an OCaml tuple */ rv = caml_alloc_tuple(4); Store_field(rv, 0, caml_copy_int32(eax)); Store_field(rv, 1, caml_copy_int32(ebx)); Store_field(rv, 2, caml_copy_int32(ecx)); Store_field(rv, 3, caml_copy_int32(edx)); CAMLreturn(rv); }
CAMLprim value stub_xc_gntshr_share_pages(value xgh, value domid, value count, value writeable) { CAMLparam4(xgh, domid, count, writeable); CAMLlocal4(result, ml_refs, ml_refs_cons, ml_map); #ifdef HAVE_GNTSHR void *map; uint32_t *refs; uint32_t c_domid; int c_count; int i; c_count = Int_val(count); c_domid = Int32_val(domid); result = caml_alloc(2, 0); refs = (uint32_t *) malloc(c_count * sizeof(uint32_t)); map = xc_gntshr_share_pages(_G(xgh), c_domid, c_count, refs, Bool_val(writeable)); if(NULL == map) { free(refs); failwith_xc(_G(xgh)); } // Construct the list of grant references. ml_refs = Val_emptylist; for(i = c_count - 1; i >= 0; i--) { ml_refs_cons = caml_alloc(2, 0); Store_field(ml_refs_cons, 0, caml_copy_int32(refs[i])); Store_field(ml_refs_cons, 1, ml_refs); ml_refs = ml_refs_cons; } ml_map = caml_ba_alloc_dims(XC_GNTTAB_BIGARRAY, 1, map, c_count << XC_PAGE_SHIFT); Store_field(result, 0, ml_refs); Store_field(result, 1, ml_map); free(refs); #else gntshr_missing(); #endif CAMLreturn(result); }
CAMLprim value caml_epoll_wait(value epfd, value maxevents, value timeout) { CAMLparam3(epfd, maxevents, timeout); CAMLlocal3(res, tmp, vevents); int imaxevents = Int_val(maxevents); struct epoll_event events[imaxevents]; // no check of maxevents > 0 int nfd = epoll_wait(Int_val(epfd), events, imaxevents, Int_val(timeout)); if( nfd == -1 ) uerror("epoll_wait", Nothing); res = caml_alloc_tuple(nfd); int i; for (i = 0; i < nfd; i++){ vevents = caml_copy_int32(events[i].events); // it must be before alloc_small! Since alloc_small hates other allocs! tmp = caml_alloc_small(2, 0); Field(tmp, 0) = Val_int(events[i].data.fd); Field(tmp, 1) = vevents; Store_field(res, i, tmp); } CAMLreturn(res); }
int bbl_instrument( unsigned long addr, const bbl_content* content, const reg_context* context, const THREADID tid ) { CAMLparam0(); CAMLlocal1( ret ); CAMLlocalN( caml_args, 5 ); unsigned i, j; uint32_t size = (uint32_t) content->size; static value *bbl_instrument_closure = NULL; if ( !bbl_instrument_closure ) { bbl_instrument_closure = caml_named_value( "bbl_instrument" ); } caml_args[0] = caml_copy_nativeint( addr ); caml_args[1] = caml_copy_int32( size ); caml_args[2] = caml_alloc_string( size ); memcpy( (unsigned char*)String_val(caml_args[2]), content->content, size ); caml_args[3] = caml_alloc_tuple( 45 ); for ( i = 0; i < 20; ++i ) { Store_field( caml_args[3], i, caml_copy_nativeint( ((long*) &context->eax)[i] ) ); } for ( i = 20; i < 29; ++i ) { Store_field( caml_args[3], i, Val_bool( ((long*) &context->eax)[i] ) ); } for ( i = 29, j = 0; i < 45; ++i ) { Store_field( caml_args[3], i, caml_copy_int64( ((uint64_t*) &context->xmm0)[j++] ) ); Store_field( caml_args[3], i, caml_copy_int64( ((uint64_t*) &context->xmm0)[j++] ) ); } caml_args[4] = Val_int( tid ); ret = caml_callbackN( *bbl_instrument_closure, 5, caml_args ); CAMLreturnT( int, Int_val(ret) ); }
/* read : 'a prim -> offset:int -> raw_pointer -> 'a */ value ctypes_read(value prim_, value offset_, value buffer_) { CAMLparam3(prim_, offset_, buffer_); CAMLlocal1(b); int offset = Int_val(offset_); void *buf = (char *)CTYPES_TO_PTR(buffer_) + offset; switch (Int_val(prim_)) { case Char: b = Val_int(*(char *)buf); break; case Schar: b = Val_int(*(signed char *)buf); break; case Uchar: b = ctypes_copy_uint8(*(unsigned char *)buf); break; case Short: b = Val_int(*(short *)buf); break; case Int: b = Val_int(*(int *)buf); break; case Long: b = ctypes_copy_long(*(long *)buf); break; case Llong: b = ctypes_copy_llong(*(long long *)buf); break; case Ushort: b = ctypes_copy_ushort(*(unsigned short *)buf); break; case Uint: b = ctypes_copy_uint(*(unsigned int *)buf); break; case Ulong: b = ctypes_copy_ulong(*(unsigned long *)buf); break; case Ullong: b = ctypes_copy_ullong(*(unsigned long long *)buf); break; case Size_t: b = ctypes_copy_size_t(*(size_t *)buf); break; case Int8_t: b = Val_int(*(int8_t *)buf); break; case Int16_t: b = Val_int(*(int16_t *)buf); break; case Int32_t: b = caml_copy_int32(*(int32_t *)buf); break; case Int64_t: b = caml_copy_int64(*(int64_t *)buf); break; case Uint8_t: b = ctypes_copy_uint8(*(uint8_t *)buf); break; case Uint16_t: b = ctypes_copy_uint16(*(uint16_t *)buf); break; case Uint32_t: b = ctypes_copy_uint32(*(uint32_t *)buf); break; case Uint64_t: b = ctypes_copy_uint64(*(uint64 *)buf); break; case Camlint: b = Val_int(*(intnat *)buf); break; case Nativeint: b = caml_copy_nativeint(*(intnat *)buf); break; case Float: b = caml_copy_double(*(float *)buf); break; case Double: b = caml_copy_double(*(double *)buf); break; case Complex32: b = ctypes_copy_float_complex(*(float complex *)buf); break; case Complex64: b = ctypes_copy_double_complex(*(double complex *)buf); break; default: assert(0); } CAMLreturn(b); }