CAMLprim value stub_xenctrlext_get_runstate_info(value xch, value domid) { CAMLparam2(xch, domid); #if defined(XENCTRL_HAS_GET_RUNSTATE_INFO) CAMLlocal1(result); xc_runstate_info_t info; int retval; retval = xc_get_runstate_info(_H(xch), _D(domid), &info); if (retval < 0) failwith_xc(_H(xch)); /* Store 0 : state (int32) 1 : missed_changes (int32) 2 : state_entry_time (int64) 3-8 : times (int64s) */ result = caml_alloc_tuple(9); Store_field(result, 0, caml_copy_int32(info.state)); Store_field(result, 1, caml_copy_int32(info.missed_changes)); Store_field(result, 2, caml_copy_int64(info.state_entry_time)); Store_field(result, 3, caml_copy_int64(info.time[0])); Store_field(result, 4, caml_copy_int64(info.time[1])); Store_field(result, 5, caml_copy_int64(info.time[2])); Store_field(result, 6, caml_copy_int64(info.time[3])); Store_field(result, 7, caml_copy_int64(info.time[4])); Store_field(result, 8, caml_copy_int64(info.time[5])); CAMLreturn(result); #else caml_failwith("XENCTRL_HAS_GET_RUNSTATE_INFO not defined"); #endif }
CAMLprim value stub_xenctrlext_get_boot_cpufeatures(value xch) { CAMLparam1(xch); #if defined(XENCTRL_HAS_GET_CPUFEATURES) CAMLlocal1(v); uint32_t a, b, c, d, e, f, g, h; int ret; ret = xc_get_boot_cpufeatures(_H(xch), &a, &b, &c, &d, &e, &f, &g, &h); if (ret < 0) failwith_xc(_H(xch)); v = caml_alloc_tuple(8); Store_field(v, 0, caml_copy_int32(a)); Store_field(v, 1, caml_copy_int32(b)); Store_field(v, 2, caml_copy_int32(c)); Store_field(v, 3, caml_copy_int32(d)); Store_field(v, 4, caml_copy_int32(e)); Store_field(v, 5, caml_copy_int32(f)); Store_field(v, 6, caml_copy_int32(g)); Store_field(v, 7, caml_copy_int32(h)); CAMLreturn(v); #else caml_failwith("XENCTRL_HAS_GET_CPUFEATURES not defined"); #endif }
CAMLprim value stub_xenctrlext_domain_set_timer_mode(value xch, value id, value mode) { CAMLparam3(xch, id, mode); int ret; ret = xcext_domain_set_timer_mode(_H(xch), _D(id), Int_val(mode)); if (ret < 0) failwith_xc(_H(xch)); CAMLreturn(Val_unit); }
CAMLprim value stub_xenctrlext_domain_suppress_spurious_page_faults(value xch, value domid) { CAMLparam2(xch, domid); int retval = xc_domain_suppress_spurious_page_faults(_H(xch), _D(domid)); if (retval) failwith_xc(_H(xch)); CAMLreturn(Val_unit); }
CAMLprim value stub_xenctrlext_domain_set_target(value xch, value domid, value target) { CAMLparam3(xch, domid, target); int retval = xc_domain_set_target(_H(xch), _D(domid), _D(target)); if (retval) failwith_xc(_H(xch)); CAMLreturn(Val_unit); }
CAMLprim value stub_xenctrlext_domain_get_acpi_s_state(value xch, value domid) { CAMLparam2(xch, domid); unsigned long v; int ret; ret = xc_get_hvm_param(_H(xch), _D(domid), HVM_PARAM_ACPI_S_STATE, &v); if (ret != 0) failwith_xc(_H(xch)); CAMLreturn(Val_int(v)); }
CAMLprim value stub_xenctrlext_get_max_nr_cpus(value xch) { CAMLparam1(xch); xc_physinfo_t c_physinfo; int r; caml_enter_blocking_section(); r = xc_physinfo(_H(xch), &c_physinfo); caml_leave_blocking_section(); if (r) failwith_xc(_H(xch)); CAMLreturn(Val_int(c_physinfo.max_cpu_id + 1)); }
CAMLprim value stub_xc_gntshr_open(void) { CAMLparam0(); CAMLlocal1(result); #ifdef HAVE_GNTSHR xc_gntshr *xgh; xgh = xc_gntshr_open(NULL, 0); if (NULL == xgh) failwith_xc(NULL); result = (value)xgh; #else gntshr_missing(); #endif CAMLreturn(result); }
CAMLprim value stub_xc_gntshr_munmap(value xgh, value share) { CAMLparam2(xgh, share); CAMLlocal1(ml_map); #ifdef HAVE_GNTSHR ml_map = Field(share, 1); int size = Caml_ba_array_val(ml_map)->dim[0]; int pages = size >> XC_PAGE_SHIFT; int result = xc_gntshr_munmap(_G(xgh), Caml_ba_data_val(ml_map), pages); if(result != 0) failwith_xc(_G(xgh)); #else gntshr_missing(); #endif CAMLreturn(Val_unit); }
CAMLprim value stub_xc_gntshr_share_pages(value xgh, value domid, value count, value writeable) { CAMLparam4(xgh, domid, count, writeable); CAMLlocal4(result, ml_refs, ml_refs_cons, ml_map); #ifdef HAVE_GNTSHR void *map; uint32_t *refs; uint32_t c_domid; int c_count; int i; c_count = Int_val(count); c_domid = Int32_val(domid); result = caml_alloc(2, 0); refs = (uint32_t *) malloc(c_count * sizeof(uint32_t)); map = xc_gntshr_share_pages(_G(xgh), c_domid, c_count, refs, Bool_val(writeable)); if(NULL == map) { free(refs); failwith_xc(_G(xgh)); } // Construct the list of grant references. ml_refs = Val_emptylist; for(i = c_count - 1; i >= 0; i--) { ml_refs_cons = caml_alloc(2, 0); Store_field(ml_refs_cons, 0, caml_copy_int32(refs[i])); Store_field(ml_refs_cons, 1, ml_refs); ml_refs = ml_refs_cons; } ml_map = caml_ba_alloc_dims(XC_GNTTAB_BIGARRAY, 1, map, c_count << XC_PAGE_SHIFT); Store_field(result, 0, ml_refs); Store_field(result, 1, ml_map); free(refs); #else gntshr_missing(); #endif CAMLreturn(result); }
CAMLprim value stub_gntshr_munmap_batched(value xgh, value share) { CAMLparam2(xgh, share); CAMLlocal1(ml_map); #ifdef HAVE_GNTSHR ml_map = Field(share, 1); int size = Bigarray_val(ml_map)->dim[0]; int pages = size >> XC_PAGE_SHIFT; #ifdef linux /* Bug in xen-4.4 libxc xc_linux_osdep implementation, work-around by using the kernel interface directly. */ int result = munmap(Data_bigarray_val(ml_map), size); #else int result = xc_gntshr_munmap(_G(xgh), Data_bigarray_val(ml_map), pages); #endif if(result != 0) failwith_xc(_G(xgh)); #else gntshr_missing(); #endif CAMLreturn(Val_unit); }