/** * virQEMUDriverGetCapabilities: * * Get a reference to the virCapsPtr instance for the * driver. If @refresh is true, the capabilities will be * rebuilt first * * The caller must release the reference with virObjetUnref * * Returns: a reference to a virCapsPtr instance or NULL */ virCapsPtr virQEMUDriverGetCapabilities(virQEMUDriverPtr driver, bool refresh) { virCapsPtr ret = NULL; if (refresh) { virCapsPtr caps = NULL; if ((caps = virQEMUDriverCreateCapabilities(driver)) == NULL) return NULL; qemuDriverLock(driver); virObjectUnref(driver->caps); driver->caps = caps; } else { qemuDriverLock(driver); } if (driver->caps->nguests == 0 && !refresh) { VIR_DEBUG("Capabilities didn't detect any guests. Forcing a " "refresh."); qemuDriverUnlock(driver); return virQEMUDriverGetCapabilities(driver, true); } ret = virObjectRef(driver->caps); qemuDriverUnlock(driver); return ret; }
virQEMUDriverConfigPtr virQEMUDriverGetConfig(virQEMUDriverPtr driver) { virQEMUDriverConfigPtr conf; qemuDriverLock(driver); conf = virObjectRef(driver->config); qemuDriverUnlock(driver); return conf; }
/** * virQEMUDriverGetCapabilities: * * Get a reference to the virCapsPtr instance for the * driver. If @refresh is true, the capabilities will be * rebuilt first * * The caller must release the reference with virObjetUnref * * Returns: a reference to a virCapsPtr instance or NULL */ virCapsPtr virQEMUDriverGetCapabilities(virQEMUDriverPtr driver, bool refresh) { virCapsPtr ret = NULL; if (refresh) { virCapsPtr caps = NULL; if ((caps = virQEMUDriverCreateCapabilities(driver)) == NULL) return NULL; qemuDriverLock(driver); virObjectUnref(driver->caps); driver->caps = caps; } else { qemuDriverLock(driver); } ret = virObjectRef(driver->caps); qemuDriverUnlock(driver); return ret; }
int qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm) { int ret = -1; int status; unsigned long long memProcessed; unsigned long long memRemaining; unsigned long long memTotal; qemuDomainObjPrivatePtr priv = vm->privateData; priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED; while (priv->jobInfo.type == VIR_DOMAIN_JOB_UNBOUNDED) { /* Poll every 50ms for progress & to allow cancellation */ struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull }; struct timeval now; int rc; const char *job; switch (priv->jobActive) { case QEMU_JOB_MIGRATION_OUT: job = _("migration job"); break; case QEMU_JOB_SAVE: job = _("domain save job"); break; case QEMU_JOB_DUMP: job = _("domain core dump job"); break; default: job = _("job"); } if (!virDomainObjIsActive(vm)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"), job, _("guest unexpectedly quit")); goto cleanup; } if (priv->jobSignals & QEMU_JOB_SIGNAL_CANCEL) { priv->jobSignals ^= QEMU_JOB_SIGNAL_CANCEL; VIR_DEBUG0("Cancelling job at client request"); qemuDomainObjEnterMonitorWithDriver(driver, vm); rc = qemuMonitorMigrateCancel(priv->mon); qemuDomainObjExitMonitorWithDriver(driver, vm); if (rc < 0) { VIR_WARN0("Unable to cancel job"); } } else if (priv->jobSignals & QEMU_JOB_SIGNAL_SUSPEND) { priv->jobSignals ^= QEMU_JOB_SIGNAL_SUSPEND; VIR_DEBUG0("Pausing domain for non-live migration"); if (qemuMigrationSetOffline(driver, vm) < 0) VIR_WARN0("Unable to pause domain"); } else if (priv->jobSignals & QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME) { unsigned long long ms = priv->jobSignalsData.migrateDowntime; priv->jobSignals ^= QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME; priv->jobSignalsData.migrateDowntime = 0; VIR_DEBUG("Setting migration downtime to %llums", ms); qemuDomainObjEnterMonitorWithDriver(driver, vm); rc = qemuMonitorSetMigrationDowntime(priv->mon, ms); qemuDomainObjExitMonitorWithDriver(driver, vm); if (rc < 0) VIR_WARN0("Unable to set migration downtime"); } else if (priv->jobSignals & QEMU_JOB_SIGNAL_MIGRATE_SPEED) { unsigned long bandwidth = priv->jobSignalsData.migrateBandwidth; priv->jobSignals ^= QEMU_JOB_SIGNAL_MIGRATE_SPEED; priv->jobSignalsData.migrateBandwidth = 0; VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth); qemuDomainObjEnterMonitorWithDriver(driver, vm); rc = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth); qemuDomainObjExitMonitorWithDriver(driver, vm); if (rc < 0) VIR_WARN0("Unable to set migration speed"); } /* Repeat check because the job signals might have caused * guest to die */ if (!virDomainObjIsActive(vm)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"), job, _("guest unexpectedly quit")); goto cleanup; } qemuDomainObjEnterMonitorWithDriver(driver, vm); rc = qemuMonitorGetMigrationStatus(priv->mon, &status, &memProcessed, &memRemaining, &memTotal); qemuDomainObjExitMonitorWithDriver(driver, vm); if (rc < 0) { priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED; goto cleanup; } if (gettimeofday(&now, NULL) < 0) { priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED; virReportSystemError(errno, "%s", _("cannot get time of day")); goto cleanup; } priv->jobInfo.timeElapsed = timeval_to_ms(now) - priv->jobStart; switch (status) { case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE: priv->jobInfo.type = VIR_DOMAIN_JOB_NONE; qemuReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"), job, _("is not active")); break; case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE: priv->jobInfo.dataTotal = memTotal; priv->jobInfo.dataRemaining = memRemaining; priv->jobInfo.dataProcessed = memProcessed; priv->jobInfo.memTotal = memTotal; priv->jobInfo.memRemaining = memRemaining; priv->jobInfo.memProcessed = memProcessed; break; case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED: priv->jobInfo.type = VIR_DOMAIN_JOB_COMPLETED; ret = 0; break; case QEMU_MONITOR_MIGRATION_STATUS_ERROR: priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED; qemuReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"), job, _("unexpectedly failed")); break; case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED: priv->jobInfo.type = VIR_DOMAIN_JOB_CANCELLED; qemuReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"), job, _("canceled by client")); break; } virDomainObjUnlock(vm); qemuDriverUnlock(driver); nanosleep(&ts, NULL); qemuDriverLock(driver); virDomainObjLock(vm); } cleanup: return ret; } /* Prepare is the first step, and it runs on the destination host. * * This version starts an empty VM listening on a localhost TCP port, and * sets up the corresponding virStream to handle the incoming data. */ int qemuMigrationPrepareTunnel(struct qemud_driver *driver, virConnectPtr dconn, virStreamPtr st, const char *dname, const char *dom_xml) { virDomainDefPtr def = NULL; virDomainObjPtr vm = NULL; virDomainEventPtr event = NULL; int ret = -1; int internalret; int dataFD[2] = { -1, -1 }; virBitmapPtr qemuCaps = NULL; qemuDomainObjPrivatePtr priv = NULL; struct timeval now; if (gettimeofday(&now, NULL) < 0) { virReportSystemError(errno, "%s", _("cannot get time of day")); return -1; } /* Parse the domain XML. */ if (!(def = virDomainDefParseString(driver->caps, dom_xml, VIR_DOMAIN_XML_INACTIVE))) goto cleanup; if (!qemuMigrationIsAllowed(def)) goto cleanup; /* Target domain name, maybe renamed. */ if (dname) { VIR_FREE(def->name); def->name = strdup(dname); if (def->name == NULL) goto cleanup; } if (virDomainObjIsDuplicate(&driver->domains, def, 1) < 0) goto cleanup; if (!(vm = virDomainAssignDef(driver->caps, &driver->domains, def, true))) { /* virDomainAssignDef already set the error */ goto cleanup; } def = NULL; priv = vm->privateData; if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) goto cleanup; priv->jobActive = QEMU_JOB_MIGRATION_OUT; /* Domain starts inactive, even if the domain XML had an id field. */ vm->def->id = -1; if (pipe(dataFD) < 0 || virSetCloseExec(dataFD[0]) < 0) { virReportSystemError(errno, "%s", _("cannot create pipe for tunnelled migration")); goto endjob; } /* check that this qemu version supports the interactive exec */ if (qemuCapsExtractVersionInfo(vm->def->emulator, vm->def->os.arch, NULL, &qemuCaps) < 0) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("Cannot determine QEMU argv syntax %s"), vm->def->emulator); goto endjob; } /* Start the QEMU daemon, with the same command-line arguments plus * -incoming stdin (which qemu_command might convert to exec:cat or fd:n) */ internalret = qemuProcessStart(dconn, driver, vm, "stdin", true, dataFD[1], NULL, VIR_VM_OP_MIGRATE_IN_START); if (internalret < 0) { qemuAuditDomainStart(vm, "migrated", false); /* Note that we don't set an error here because qemuProcessStart * should have already done that. */ if (!vm->persistent) { virDomainRemoveInactive(&driver->domains, vm); vm = NULL; } goto endjob; } if (virFDStreamOpen(st, dataFD[0]) < 0) { qemuAuditDomainStart(vm, "migrated", false); qemuProcessStop(driver, vm, 0); if (!vm->persistent) { if (qemuDomainObjEndJob(vm) > 0) virDomainRemoveInactive(&driver->domains, vm); vm = NULL; } virReportSystemError(errno, "%s", _("cannot pass pipe for tunnelled migration")); goto endjob; } qemuAuditDomainStart(vm, "migrated", true); event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_STARTED, VIR_DOMAIN_EVENT_STARTED_MIGRATED); ret = 0; endjob: if (vm && qemuDomainObjEndJob(vm) == 0) vm = NULL; /* We set a fake job active which is held across * API calls until the finish() call. This prevents * any other APIs being invoked while incoming * migration is taking place */ if (vm && virDomainObjIsActive(vm)) { priv->jobActive = QEMU_JOB_MIGRATION_IN; priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED; priv->jobStart = timeval_to_ms(now); } cleanup: qemuCapsFree(qemuCaps); virDomainDefFree(def); VIR_FORCE_CLOSE(dataFD[0]); VIR_FORCE_CLOSE(dataFD[1]); if (vm) virDomainObjUnlock(vm); if (event) qemuDomainEventQueue(driver, event); qemuDriverUnlock(driver); return ret; }