Пример #1
0
CAMLprim value stub_xc_hvm_build_native(value xc_handle, value domid,
                                        value mem_max_mib, value mem_start_mib, value image_name, value store_evtchn, value console_evtchn)
{
    CAMLparam5(xc_handle, domid, mem_max_mib, mem_start_mib, image_name);
    CAMLxparam2(store_evtchn, console_evtchn);
    CAMLlocal1(result);

    char *image_name_c = strdup(String_val(image_name));
    char *error[256];
    xc_interface *xch;

    unsigned long store_mfn=0;
    unsigned long console_mfn=0;
    int r;
    struct flags f;
    /* The xenguest interface changed and was backported to XCP: */
#if defined(XENGUEST_HAS_HVM_BUILD_ARGS) || (__XEN_LATEST_INTERFACE_VERSION__ >= 0x00040200)
    struct xc_hvm_build_args args;
#endif
    get_flags(&f, _D(domid));

    xch = _H(xc_handle);
    configure_vcpus(xch, _D(domid), f);
    configure_tsc(xch, _D(domid), f);

#if defined(XENGUEST_HAS_HVM_BUILD_ARGS) || (__XEN_LATEST_INTERFACE_VERSION__ >= 0x00040200)
    args.mem_size = (uint64_t)Int_val(mem_max_mib) << 20;
    args.mem_target = (uint64_t)Int_val(mem_start_mib) << 20;
    args.mmio_size = f.mmio_size_mib << 20;
    args.image_file_name = image_name_c;
#endif

    caml_enter_blocking_section ();
#if defined(XENGUEST_HAS_HVM_BUILD_ARGS) || (__XEN_LATEST_INTERFACE_VERSION__ >= 0x00040200)
    r = xc_hvm_build(xch, _D(domid), &args);
#else
    r = xc_hvm_build_target_mem(xch, _D(domid),
                                Int_val(mem_max_mib),
                                Int_val(mem_start_mib),
                                image_name_c);
#endif
    caml_leave_blocking_section ();

    free(image_name_c);

    if (r)
        failwith_oss_xc(xch, "hvm_build");


    r = hvm_build_set_params(xch, _D(domid), Int_val(store_evtchn), &store_mfn,
                             Int_val(console_evtchn), &console_mfn, f);
    if (r)
        failwith_oss_xc(xch, "hvm_build_params");

    result = caml_alloc_tuple(2);
    Store_field(result, 0, caml_copy_nativeint(store_mfn));
    Store_field(result, 1, caml_copy_nativeint(console_mfn));

    CAMLreturn(result);
}
int stub_xc_linux_build(int c_mem_max_mib, int mem_start_mib,
                        const char *image_name, const char *ramdisk_name,
                        const char *cmdline, const char *features,
                        int flags, int store_evtchn, int store_domid,
                        int console_evtchn, int console_domid,
                        unsigned long *store_mfn, unsigned long *console_mfn,
                        char *protocol)
{
    int r;
    struct xc_dom_image *dom;

    struct flags f;
    get_flags(&f);

    xc_dom_loginit(xch);
    dom = xc_dom_allocate(xch, cmdline, features);
    if (!dom)
        failwith_oss_xc("xc_dom_allocate");

    /* The default image size limits are too large. */
    xc_dom_kernel_max_size(dom, get_image_max_size("kernel"));
    xc_dom_ramdisk_max_size(dom, get_image_max_size("ramdisk"));

    configure_vcpus(f);
    configure_tsc(f);

    r = xc_dom_linux_build(xch, dom, domid, mem_start_mib,
                           image_name, ramdisk_name, flags,
                           store_evtchn, store_mfn,
                           console_evtchn, console_mfn);
    if ( r )
        failwith_oss_xc("xc_dom_linux_build");

    r = construct_cpuid_policy(&f, false);
    if ( r )
        failwith_oss_xc("construct_cpuid_policy");

    r = xc_dom_gnttab_seed(xch, domid,
                           *console_mfn, *store_mfn,
                           console_domid, store_domid);
    if ( r )
        failwith_oss_xc("xc_dom_gnttab_seed");

    strncpy(protocol, xc_domain_get_native_protocol(xch, domid), 64);

    free_flags(&f);
    xc_dom_release(dom);

    return 0;
}
static void configure_tsc(struct flags f)
{
    int rc = xc_domain_set_tsc_info(xch, domid, f.tsc_mode, 0, 0, 0);

    if (rc)
        failwith_oss_xc("xc_domain_set_tsc_info");
}
CAMLprim value stub_xenguest_init()
{
	xc_interface *xch;

	xch = xc_interface_open(NULL, NULL, 0);
	if (xch == NULL)
		failwith_oss_xc(NULL, "xc_interface_open");
	return (value)xch;
}
Пример #5
0
CAMLprim value stub_xc_domain_dumpcore(value handle, value domid, value file)
{
    CAMLparam3(handle, domid, file);
    int r;

    r = xc_domain_dumpcore(_H(handle), _D(domid), String_val(file));
    if (r)
        failwith_oss_xc(_H(handle), "xc_domain_dumpcore");
    CAMLreturn(Val_unit);
}
/* this is the slow version of resume for uncooperative domain,
 * the fast version is available in close source xc */
int stub_xc_domain_resume_slow(void)
{
    int r;

    /* hard code fast to 0, we only want to expose the slow version here */
    r = xc_domain_resume(xch, domid, 0);
    if (r)
        failwith_oss_xc("xc_domain_resume");
    return 0;
}
static void configure_vcpus(struct flags f){
    struct xen_domctl_sched_credit sdom;
    int i, j, r, size, pcpus_supplied, min;
    xc_cpumap_t cpumap;

    size = xc_get_cpumap_size(xch) * 8; /* array is of uint8_t */

    for (i=0; i<f.vcpus; i++){
        if (f.vcpu_affinity[i]){ /* NULL means unset */
            pcpus_supplied = strlen(f.vcpu_affinity[i]);
            min = (pcpus_supplied < size)?pcpus_supplied:size;
            cpumap = xc_cpumap_alloc(xch);
            if (cpumap == NULL)
                failwith_oss_xc("xc_cpumap_alloc");

            for (j=0; j<min; j++) {
                if (f.vcpu_affinity[i][j] == '1')
                    cpumap[j/8] |= 1 << (j&7);
            }
            r = xc_vcpu_setaffinity(xch, domid, i, cpumap, NULL,
                                    XEN_VCPUAFFINITY_HARD);
            free(cpumap);
            if (r) {
                failwith_oss_xc("xc_vcpu_setaffinity");
            }
        }
    }

    r = xc_sched_credit_domain_get(xch, domid, &sdom);
    /* This should only happen when a different scheduler is set */
    if (r) {
        xg_info("Failed to get credit scheduler parameters: scheduler not enabled?\n");
        return;
    }
    if (f.vcpu_weight != 0L) sdom.weight = f.vcpu_weight;
    if (f.vcpu_cap != 0L) sdom.cap = f.vcpu_cap;
    /* This shouldn't fail, if "get" above succeeds. This error is fatal
       to highlight the need to investigate further. */
    r = xc_sched_credit_domain_set(xch, domid, &sdom);
    if (r)
        failwith_oss_xc("xc_sched_credit_domain_set");
}
Пример #8
0
/* this is the slow version of resume for uncooperative domain,
 * the fast version is available in close source xc */
CAMLprim value stub_xc_domain_resume_slow(value handle, value domid)
{
    CAMLparam2(handle, domid);
    int r;

    /* hard code fast to 0, we only want to expose the slow version here */
    r = xc_domain_resume(_H(handle), _D(domid), 0);
    if (r)
        failwith_oss_xc(_H(handle), "xc_domain_resume");
    CAMLreturn(Val_unit);
}
Пример #9
0
CAMLprim value stub_xenguest_init()
{
    xc_interface *xch;

    openlog("xenguest", LOG_NDELAY, LOG_DAEMON);

    xch = xc_interface_open(NULL, NULL, 0);
    if (xch == NULL)
        failwith_oss_xc(NULL, "xc_interface_open");
    return (value)xch;
}
int stub_xc_domain_save(int fd, int max_iters, int max_factors,
                        int flags, int hvm)
{
    int r;
    struct save_callbacks callbacks =
        {
            .suspend = suspend_callback,
            .switch_qemu_logdirty = switch_qemu_logdirty,
            .data = NULL,
        };

    r = xc_domain_save(xch, fd, domid,
                       max_iters, max_factors,
                       flags, &callbacks, hvm);
    if (r)
        failwith_oss_xc("xc_domain_save");

    return 0;
}
Пример #11
0
CAMLprim value stub_xc_domain_save(value handle, value fd, value domid,
                                   value max_iters, value max_factors,
                                   value flags, value hvm)
{
    CAMLparam5(handle, fd, domid, max_iters, max_factors);
    CAMLxparam2(flags, hvm);
    struct save_callbacks callbacks;

    uint32_t c_flags;
    uint32_t c_domid;
    int r;
    unsigned long generation_id_addr;

    c_flags = caml_convert_flag_list(flags, suspend_flag_list);
    c_domid = _D(domid);

    memset(&callbacks, 0, sizeof(callbacks));
    callbacks.data = (void*) c_domid;
    callbacks.suspend = dispatch_suspend;
    callbacks.switch_qemu_logdirty = switch_qemu_logdirty;

    caml_enter_blocking_section();
    generation_id_addr = xenstore_get(c_domid, GENERATION_ID_ADDRESS);
    r = xc_domain_save(_H(handle), Int_val(fd), c_domid,
                       Int_val(max_iters), Int_val(max_factors),
                       c_flags, &callbacks, Bool_val(hvm)
#ifdef XENGUEST_4_2
                       ,generation_id_addr
#endif
        );
    caml_leave_blocking_section();
    if (r)
        failwith_oss_xc(_H(handle), "xc_domain_save");

    CAMLreturn(Val_unit);
}
Пример #12
0
CAMLprim value stub_xc_domain_restore(value handle, value fd, value domid,
                                      value store_evtchn, value store_domid,
                                      value console_evtchn, value console_domid,
                                      value hvm, value no_incr_generationid)
{
    CAMLparam5(handle, fd, domid, store_evtchn, console_evtchn);
    CAMLxparam1(hvm);
    CAMLlocal1(result);
    unsigned long store_mfn, console_mfn;
    domid_t c_store_domid, c_console_domid;
    unsigned long c_vm_generationid_addr;
    char c_vm_generationid_addr_s[32];
    unsigned int c_store_evtchn, c_console_evtchn;
    int r;
    size_t size, written;

    struct flags f;
    get_flags(&f,_D(domid));

    c_store_evtchn = Int_val(store_evtchn);
    c_store_domid = Int_val(store_domid);
    c_console_evtchn = Int_val(console_evtchn);
    c_console_domid = Int_val(console_domid);

#ifdef HVM_PARAM_VIRIDIAN
    xc_set_hvm_param(_H(handle), _D(domid), HVM_PARAM_VIRIDIAN, f.viridian);
#endif
    configure_vcpus(_H(handle), _D(domid), f);

    caml_enter_blocking_section();

    r = xc_domain_restore(_H(handle), Int_val(fd), _D(domid),
                          c_store_evtchn, &store_mfn,
#ifdef XENGUEST_4_2
                          c_store_domid,
#endif
                          c_console_evtchn, &console_mfn,
#ifdef XENGUEST_4_2
                          c_console_domid,
#endif
                          Bool_val(hvm), f.pae, 0 /*superpages*/
#ifdef XENGUEST_4_2
                          ,
                          Bool_val(no_incr_generationid),
                          &c_vm_generationid_addr,
                          NULL /* restore_callbacks */
#endif
        );
    if (!r) {
        size = sizeof(c_vm_generationid_addr_s) - 1; /* guarantee a NULL remains on the end */
        written = snprintf(c_vm_generationid_addr_s, size, "0x%lx", c_vm_generationid_addr);
        if (written < size)
            r = xenstore_puts(_D(domid), c_vm_generationid_addr_s, GENERATION_ID_ADDRESS);
        else {
            syslog(LOG_ERR|LOG_DAEMON,"Failed to write %s (%d >= %d)", GENERATION_ID_ADDRESS, written, size);
            r = 1;
        }
    }
    caml_leave_blocking_section();
    if (r)
        failwith_oss_xc(_H(handle), "xc_domain_restore");

    result = caml_alloc_tuple(2);
    Store_field(result, 0, caml_copy_nativeint(store_mfn));
    Store_field(result, 1, caml_copy_nativeint(console_mfn));
    CAMLreturn(result);
}
Пример #13
0
CAMLprim value stub_xc_linux_build_native(value xc_handle, value domid,
                                          value mem_max_mib, value mem_start_mib,
                                          value image_name, value ramdisk_name,
                                          value cmdline, value features,
                                          value flags, value store_evtchn,
                                          value console_evtchn)
{
    CAMLparam5(xc_handle, domid, mem_max_mib, mem_start_mib, image_name);
    CAMLxparam5(ramdisk_name, cmdline, features, flags, store_evtchn);
    CAMLxparam1(console_evtchn);
    CAMLlocal1(result);

    unsigned long store_mfn;
    unsigned long console_mfn;
    int r;
    struct xc_dom_image *dom;
    char c_protocol[64];

    /* Copy the ocaml values into c-land before dropping the mutex */
    xc_interface *xch = _H(xc_handle);
    unsigned int c_mem_start_mib = Int_val(mem_start_mib);
    uint32_t c_domid = _D(domid);
    char *c_image_name = strdup(String_val(image_name));
    char *c_ramdisk_name = ramdisk_name == None_val ? NULL : strdup(String_val(Field(ramdisk_name, 0)));
    unsigned long c_flags = Int_val(flags);
    unsigned int c_store_evtchn = Int_val(store_evtchn);
    unsigned int c_console_evtchn = Int_val(console_evtchn);

    struct flags f;
    get_flags(&f,c_domid);

    xc_dom_loginit(xch);
    dom = xc_dom_allocate(xch, String_val(cmdline), String_val(features));
    if (!dom)
        failwith_oss_xc(xch, "xc_dom_allocate");

    configure_vcpus(xch, c_domid, f);
    configure_tsc(xch, c_domid, f);
#ifdef XC_HAVE_DECOMPRESS_LIMITS
    if ( xc_dom_kernel_max_size(dom, f.kernel_max_size) )
        failwith_oss_xc(xch, "xc_dom_kernel_max_size");
    if ( xc_dom_ramdisk_max_size(dom, f.ramdisk_max_size) )
        failwith_oss_xc(xch, "xc_dom_ramdisk_max_size");
#else
    if ( f.kernel_max_size || f.ramdisk_max_size ) {
        syslog(LOG_WARNING|LOG_DAEMON,"Kernel/Ramdisk limits set, but no support compiled in");
    }
#endif

    caml_enter_blocking_section();
    r = xc_dom_linux_build(xch, dom, c_domid, c_mem_start_mib,
                           c_image_name, c_ramdisk_name, c_flags,
                           c_store_evtchn, &store_mfn,
                           c_console_evtchn, &console_mfn);
    caml_leave_blocking_section();

#ifndef XEN_UNSTABLE
    strncpy(c_protocol, xc_dom_get_native_protocol(dom), 64);
#else
    memset(c_protocol, '\0', 64);
#endif
    free(c_image_name);
    free(c_ramdisk_name);
    xc_dom_release(dom);

    if (r != 0)
        failwith_oss_xc(xch, "xc_dom_linux_build");

    result = caml_alloc_tuple(3);
    Store_field(result, 0, caml_copy_nativeint(store_mfn));
    Store_field(result, 1, caml_copy_nativeint(console_mfn));
    Store_field(result, 2, caml_copy_string(c_protocol));

    CAMLreturn(result);
}
int stub_xc_domain_restore(int fd, int store_evtchn, int console_evtchn,
                           int hvm,
                           unsigned long *store_mfn, unsigned long *console_mfn)
{
    int r = 0;
    struct flags f;

    get_flags(&f);

    if ( hvm )
    {
        /*
         * We have to do this even in the domain restore case as XenServers
         * prior to 6.0.2 did not create a viridian save record.
         */
        if (f.viridian)
            hvm_set_viridian_features(&f);

        xc_set_hvm_param(xch, domid, HVM_PARAM_HPET_ENABLED, f.hpet);
#ifdef HAVE_CORES_PER_SOCKET
        if ( f.cores_per_socket > 0 )
            r = xc_domain_set_cores_per_socket(xch, domid, f.cores_per_socket);
#endif
        if ( r )
            failwith_oss_xc("xc_domain_set_cores_per_socket");
    }

    configure_vcpus(f);

    r = xc_domain_restore(xch, fd, domid,
                          store_evtchn, store_mfn, 0,
                          console_evtchn, console_mfn, 0,
                          hvm, f.pae, 0, 0, NULL);
    if ( r )
        failwith_oss_xc("xc_domain_restore");
    /*
     * The legacy -> migration v2 code in XenServer 6.5 didn't combine the
     * out-of-band HVM_PARAM_PAE_ENABLED into the converted stream, and
     * xenguest didn't set it, as the v2 restore code was expected to.
     *
     * This causes xc_cpuid_apply_policy() to hide the PAE bit from the domain
     * cpuid policy, which went unnoticed (and without incident, despite being
     * a guest-visible change) until Xen-4.5 became stricter with its checks
     * for when a guest writes to %cr4.
     *
     * The correct value is still available out-of-band, so clobber the result
     * from the stream, in case the stream is from XenServer 6.5 and is a VM
     * which hasn't rebooted and has a bad HVM PARAM in the v2 stream.
     */
    if ( hvm )
        xc_set_hvm_param(xch, domid, HVM_PARAM_PAE_ENABLED, f.pae);

    r = construct_cpuid_policy(&f, hvm);
    if ( r )
        failwith_oss_xc("construct_cpuid_policy");

    free_flags(&f);

    if ( hvm )
    {
        r = set_genid();
        if (r)
            exit(1);
    }

    return 0;
}
int stub_xc_hvm_build_with_mem(uint64_t max_mem_mib, uint64_t max_start_mib,
                               const char *image)
{
    uint64_t lowmem_end, highmem_start, highmem_end, mmio_start;
    struct xc_hvm_build_args args = {
        .mem_size   = max_mem_mib   << 20,
        .mem_target = max_start_mib << 20,
        .mmio_size  = HVM_BELOW_4G_MMIO_LENGTH,
        .image_file_name = image,
    };
    unsigned int i, j, nr = 0;
    struct e820entry *e820;
    int rc;
    unsigned int nr_rdm_entries[MAX_RMRR_DEVICES] = {0};
    unsigned int nr_rmrr_devs = 0;
    struct xen_reserved_device_memory *xrdm[MAX_RMRR_DEVICES] = {0};
    unsigned long rmrr_overlapped_ram = 0;
    char *s;

    if ( pci_passthrough_sbdf_list )
    {
        s = strtok(pci_passthrough_sbdf_list,",");
        while ( s != NULL )
        {
            unsigned int seg, bus, device, func;
            xg_info("Getting RMRRs for device '%s'\n",s);
            if ( parse_pci_sbdf(s, &seg, &bus, &device, &func) )
            {
                if ( !get_rdm(seg, bus, (device << 3) + func,
                        &nr_rdm_entries[nr_rmrr_devs], &xrdm[nr_rmrr_devs]) )
                    nr_rmrr_devs++;
            }
            if ( nr_rmrr_devs == MAX_RMRR_DEVICES )
            {
                xg_err("Error: hit limit of %d RMRR devices for domain\n",
                            MAX_RMRR_DEVICES);
                exit(1);
            }
            s = strtok (NULL, ",");
        }
    }
    e820 = malloc(sizeof(*e820) * E820MAX);
    if (!e820)
	    return -ENOMEM;

    lowmem_end  = args.mem_size;
    highmem_end = highmem_start = 1ull << 32;
    mmio_start  = highmem_start - args.mmio_size;

    if ( lowmem_end > mmio_start )
    {
        highmem_end = (1ull << 32) + (lowmem_end - mmio_start);
        lowmem_end = mmio_start;
    }

    args.lowmem_end = lowmem_end;
    args.highmem_end = highmem_end;
    args.mmio_start = mmio_start;

    /* Leave low 1MB to HVMLoader... */
    e820[nr].addr = 0x100000u;
    e820[nr].size = args.lowmem_end - 0x100000u;
    e820[nr].type = E820_RAM;
    nr++;

    /* RDM mapping */
    for (i = 0; i < nr_rmrr_devs; i++)
    {
        for (j = 0; j < nr_rdm_entries[i]; j++)
        {
            e820[nr].addr = xrdm[i][j].start_pfn << XC_PAGE_SHIFT;
            e820[nr].size = xrdm[i][j].nr_pages << XC_PAGE_SHIFT;
            e820[nr].type = E820_RESERVED;
            xg_info("Adding RMRR 0x%lx size 0x%lx\n", e820[nr].addr, e820[nr].size);
            if ( e820[nr].addr < args.lowmem_end ) {
                rmrr_overlapped_ram += ( args.lowmem_end - e820[nr].addr );
                args.lowmem_end = e820[nr].addr;
            }
            nr++;
        }
    }
    e820[0].size -= rmrr_overlapped_ram;
    args.highmem_end += rmrr_overlapped_ram;
    args.mmio_size += rmrr_overlapped_ram;
    args.mmio_start -= rmrr_overlapped_ram;

    for (i = 0; i < nr_rmrr_devs; i++)
    {
        free(xrdm[i]);
    }

    if ( args.highmem_end > highmem_start )
    {
        e820[nr].addr = highmem_start;
        e820[nr].size = args.highmem_end - e820[nr].addr;
        e820[nr].type = E820_RAM;
        nr++;
    }

    rc = xc_hvm_build(xch, domid, &args);

    if (!rc)
        rc = xc_domain_set_memory_map(xch, domid, e820, nr);

    free(e820);

    return rc;
}

int stub_xc_hvm_build(int mem_max_mib, int mem_start_mib, const char *image_name,
                      int store_evtchn, int store_domid,
                      int console_evtchn, int console_domid,
                      unsigned long *store_mfn, unsigned long *console_mfn)
{
    int r;
    struct flags f;
    get_flags(&f);

    configure_vcpus(f);
    configure_tsc(f);

    r = stub_xc_hvm_build_with_mem(mem_max_mib, mem_start_mib, image_name);
    if ( r )
        failwith_oss_xc("hvm_build");

    r = hvm_build_set_params(store_evtchn, store_mfn,
                             console_evtchn, console_mfn, f);
    if ( r )
        failwith_oss_xc("hvm_build_params");

    r = construct_cpuid_policy(&f, true);
    if ( r )
        failwith_oss_xc("construct_cpuid_policy");

    r = xc_dom_gnttab_hvm_seed(xch, domid, *console_mfn, *store_mfn,
                               console_domid, store_domid);
    if ( r )
        failwith_oss_xc("xc_dom_gnttab_hvm_seed");

    free_flags(&f);

    return 0;
}