static void colo_enable_logdirty(libxl__colo_restore_state *crs, libxl__egc *egc) { libxl__domain_create_state *dcs = CONTAINER_OF(crs, *dcs, crs); libxl__colo_restore_checkpoint_state *crcs = crs->crcs; /* Convenience aliases */ const uint32_t domid = crs->domid; libxl__logdirty_switch *const lds = &crcs->lds; EGC_GC; /* we need to know which pages are dirty to restore the guest */ if (xc_shadow_control(CTX->xch, domid, XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY, NULL, 0, NULL, 0, NULL) < 0) { LOGD(ERROR, domid, "cannot enable secondary vm's logdirty"); lds->callback(egc, lds, ERROR_FAIL); return; } if (crs->hvm) { libxl__domain_common_switch_qemu_logdirty(egc, domid, 1, lds); return; } lds->callback(egc, lds, 0); }
static int alloc_magic_pages(struct xc_dom_image *dom) { struct ft_cxt devtree; void *guest_devtree; unsigned long shadow_mb; int rma_pages; int rc; /* Allocate special pages from the end of the RMA. */ rma_pages = 1 << (dom->realmodearea_log - PAGE_SHIFT); dom->shared_info_pfn = --rma_pages; dom->console_pfn = --rma_pages; dom->xenstore_pfn = --rma_pages; /* Gather shadow allocation info for the device tree. */ rc = xc_shadow_control(dom->guest_xc, dom->guest_domid, XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION, NULL, 0, &shadow_mb, 0, NULL); if (rc < 0 || shadow_mb == 0) { xc_dom_printf("Couldn't get shadow allocation size or it was 0.\n"); return rc; } /* Build device tree. */ rc = make_devtree(&devtree, dom, shadow_mb); if (rc < 0) { xc_dom_printf("Failed to create flattened device tree.\n"); return rc; } /* Find a spot for it. */ rc = xc_dom_alloc_segment(dom, &dom->devicetree_seg, "devtree", 0, devtree.bph->totalsize); if (rc) goto out; /* Copy the device tree into place. */ guest_devtree = xc_dom_seg_to_ptr(dom, &dom->devicetree_seg); if (!guest_devtree) { xc_dom_printf("Couldn't map guest memory for device tree.\n"); rc = -1; goto out; } memcpy(guest_devtree, devtree.bph, devtree.bph->totalsize); out: free_devtree(&devtree); return rc; }
int libxl__arch_domain_create(libxl__gc *gc, libxl_domain_config *d_config, uint32_t domid) { int ret = 0; int tsc_mode; uint32_t rtc_timeoffset; libxl_ctx *ctx = libxl__gc_owner(gc); if (d_config->b_info.type == LIBXL_DOMAIN_TYPE_PV) xc_domain_set_memmap_limit(ctx->xch, domid, (d_config->b_info.max_memkb + d_config->b_info.u.pv.slack_memkb)); switch (d_config->b_info.tsc_mode) { case LIBXL_TSC_MODE_DEFAULT: tsc_mode = 0; break; case LIBXL_TSC_MODE_ALWAYS_EMULATE: tsc_mode = 1; break; case LIBXL_TSC_MODE_NATIVE: tsc_mode = 2; break; case LIBXL_TSC_MODE_NATIVE_PARAVIRT: tsc_mode = 3; break; default: abort(); } xc_domain_set_tsc_info(ctx->xch, domid, tsc_mode, 0, 0, 0); if (libxl_defbool_val(d_config->b_info.disable_migrate)) xc_domain_disable_migrate(ctx->xch, domid); rtc_timeoffset = d_config->b_info.rtc_timeoffset; if (libxl_defbool_val(d_config->b_info.localtime)) { time_t t; struct tm *tm, result; t = time(NULL); tm = localtime_r(&t, &result); if (!tm) { LOGE(ERROR, "Failed to call localtime_r"); ret = ERROR_FAIL; goto out; } rtc_timeoffset += tm->tm_gmtoff; } if (rtc_timeoffset) xc_domain_set_time_offset(ctx->xch, domid, rtc_timeoffset); if (d_config->b_info.type == LIBXL_DOMAIN_TYPE_HVM || libxl_defbool_val(d_config->c_info.pvh)) { unsigned long shadow; shadow = (d_config->b_info.shadow_memkb + 1023) / 1024; xc_shadow_control(ctx->xch, domid, XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION, NULL, 0, &shadow, 0, NULL); } if (d_config->c_info.type == LIBXL_DOMAIN_TYPE_PV && libxl_defbool_val(d_config->b_info.u.pv.e820_host)) { ret = libxl__e820_alloc(gc, domid, d_config); if (ret) { LOGE(ERROR, "Failed while collecting E820 with: %d (errno:%d)\n", ret, errno); } } out: return ret; }