コード例 #1
0
static int
bhyveDomainCreateWithFlags(virDomainPtr dom,
                           unsigned int flags)
{
    bhyveConnPtr privconn = dom->conn->privateData;
    virDomainObjPtr vm;
    unsigned int start_flags = 0;
    int ret = -1;

    virCheckFlags(VIR_DOMAIN_START_AUTODESTROY, -1);

    if (flags & VIR_DOMAIN_START_AUTODESTROY)
        start_flags |= VIR_BHYVE_PROCESS_START_AUTODESTROY;

    if (!(vm = bhyveDomObjFromDomain(dom)))
        goto cleanup;

    if (virDomainCreateWithFlagsEnsureACL(dom->conn, vm->def) < 0)
        goto cleanup;

    if (virDomainObjIsActive(vm)) {
        virReportError(VIR_ERR_OPERATION_INVALID,
                       "%s", _("Domain is already running"));
        goto cleanup;
    }

    ret = virBhyveProcessStart(dom->conn, privconn, vm,
                               VIR_DOMAIN_RUNNING_BOOTED,
                               start_flags);

 cleanup:
    virObjectUnlock(vm);
    return ret;
}
コード例 #2
0
ファイル: bhyve_driver.c プロジェクト: hitchiker42/libvirt
static int
bhyveDomainGetInfo(virDomainPtr domain, virDomainInfoPtr info)
{
    virDomainObjPtr vm;
    int ret = -1;

    if (!(vm = bhyveDomObjFromDomain(domain)))
        goto cleanup;

    if (virDomainGetInfoEnsureACL(domain->conn, vm->def) < 0)
        goto cleanup;

    if (virDomainObjIsActive(vm)) {
        if (virBhyveGetDomainTotalCpuStats(vm, &(info->cpuTime)) < 0)
            goto cleanup;
    } else {
        info->cpuTime = 0;
    }

    info->state = virDomainObjGetState(vm, NULL);
    info->maxMem = vm->def->mem.max_balloon;
    info->nrVirtCpu = vm->def->vcpus;
    ret = 0;

 cleanup:
    if (vm)
        virObjectUnlock(vm);
    return ret;
}
コード例 #3
0
ファイル: lxc_process.c プロジェクト: i-ninth/libvirt
static int
virLXCProcessAutostartDomain(virDomainObjPtr vm,
                             void *opaque)
{
    const struct virLXCProcessAutostartData *data = opaque;
    int ret = 0;

    virObjectLock(vm);
    if (vm->autostart &&
        !virDomainObjIsActive(vm)) {
        ret = virLXCProcessStart(data->conn, data->driver, vm,
                                 0, NULL, false,
                                 VIR_DOMAIN_RUNNING_BOOTED);
        virDomainAuditStart(vm, "booted", ret >= 0);
        if (ret < 0) {
            virErrorPtr err = virGetLastError();
            VIR_ERROR(_("Failed to autostart VM '%s': %s"),
                      vm->def->name,
                      err ? err->message : "");
        } else {
            virObjectEventPtr event =
                virDomainEventLifecycleNewFromObj(vm,
                                         VIR_DOMAIN_EVENT_STARTED,
                                         VIR_DOMAIN_EVENT_STARTED_BOOTED);
            if (event)
                virObjectEventStateQueue(data->driver->domainEventState, event);
        }
    }
    virObjectUnlock(vm);
    return ret;
}
コード例 #4
0
ファイル: vmware_driver.c プロジェクト: rbu/libvirt
static int
vmwareDomainCreateWithFlags(virDomainPtr dom,
                            unsigned int flags ATTRIBUTE_UNUSED)
{
    struct vmware_driver *driver = dom->conn->privateData;
    virDomainObjPtr vm;
    int ret = -1;

    vmwareDriverLock(driver);
    vm = virDomainFindByUUID(&driver->domains, dom->uuid);
    if (!vm) {
        char uuidstr[VIR_UUID_STRING_BUFLEN];
        virUUIDFormat(dom->uuid, uuidstr);
        vmwareError(VIR_ERR_NO_DOMAIN,
                    _("No domain with matching uuid '%s'"), uuidstr);
        goto cleanup;
    }

    if (virDomainObjIsActive(vm)) {
        vmwareError(VIR_ERR_OPERATION_INVALID,
                    "%s", _("Domain is already running"));
        goto cleanup;
    }

    ret = vmwareStartVM(driver, vm);

cleanup:
    if (vm)
        virDomainObjUnlock(vm);
    vmwareDriverUnlock(driver);
    return ret;
}
コード例 #5
0
static int
bhyveDomainDestroy(virDomainPtr dom)
{
    bhyveConnPtr privconn = dom->conn->privateData;
    virDomainObjPtr vm;
    int ret = -1;

    if (!(vm = bhyveDomObjFromDomain(dom)))
        goto cleanup;

    if (virDomainDestroyEnsureACL(dom->conn, vm->def) < 0)
        goto cleanup;

    if (!virDomainObjIsActive(vm)) {
        virReportError(VIR_ERR_OPERATION_INVALID,
                       "%s", _("Domain is not running"));
        goto cleanup;
    }

    ret = virBhyveProcessStop(privconn, vm, VIR_DOMAIN_SHUTOFF_DESTROYED);

    if (!vm->persistent) {
        virDomainObjListRemove(privconn->domains, vm);
        vm = NULL;
    }

 cleanup:
    if (vm)
        virObjectUnlock(vm);
    return ret;
}
コード例 #6
0
ファイル: lxc_process.c プロジェクト: ISI-apex/libvirt-ARM
int virLXCProcessStop(virLXCDriverPtr driver,
                      virDomainObjPtr vm,
                      virDomainShutoffReason reason)
{
    int rc;
    virLXCDomainObjPrivatePtr priv;

    VIR_DEBUG("Stopping VM name=%s pid=%d reason=%d",
              vm->def->name, (int)vm->pid, (int)reason);
    if (!virDomainObjIsActive(vm)) {
        VIR_DEBUG("VM '%s' not active", vm->def->name);
        return 0;
    }

    priv = vm->privateData;

    if (vm->pid <= 0) {
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("Invalid PID %d for container"), vm->pid);
        return -1;
    }

    virSecurityManagerRestoreAllLabel(driver->securityManager,
                                      vm->def, false);
    virSecurityManagerReleaseLabel(driver->securityManager, vm->def);
    /* Clear out dynamically assigned labels */
    if (vm->def->nseclabels &&
        vm->def->seclabels[0]->type == VIR_DOMAIN_SECLABEL_DYNAMIC) {
        VIR_FREE(vm->def->seclabels[0]->model);
        VIR_FREE(vm->def->seclabels[0]->label);
        VIR_FREE(vm->def->seclabels[0]->imagelabel);
    }

    if (priv->cgroup) {
        rc = virCgroupKillPainfully(priv->cgroup);
        if (rc < 0)
            return -1;
        if (rc > 0) {
            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                           _("Some processes refused to die"));
            return -1;
        }
    } else {
        /* If cgroup doesn't exist, just try cleaning up the
         * libvirt_lxc process */
        if (virProcessKillPainfully(vm->pid, true) < 0) {
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           _("Processes %d refused to die"), (int)vm->pid);
            return -1;
        }
    }

    virLXCProcessCleanup(driver, vm, reason);

    return 0;
}
コード例 #7
0
static int
openvzDomainInterfaceStats (virDomainPtr dom,
                            const char *path,
                            struct _virDomainInterfaceStats *stats)
{
    struct openvz_driver *driver = dom->conn->privateData;
    virDomainObjPtr vm;
    int i;
    int ret = -1;

    openvzDriverLock(driver);
    vm = virDomainFindByUUID(&driver->domains, dom->uuid);
    openvzDriverUnlock(driver);

    if (!vm) {
        char uuidstr[VIR_UUID_STRING_BUFLEN];
        virUUIDFormat(dom->uuid, uuidstr);
        openvzError(VIR_ERR_NO_DOMAIN,
                    _("no domain with matching uuid '%s'"), uuidstr);
        goto cleanup;
    }

    if (!virDomainObjIsActive(vm)) {
        openvzError(VIR_ERR_OPERATION_INVALID,
                    "%s", _("domain is not running"));
        goto cleanup;
    }

    /* Check the path is one of the domain's network interfaces. */
    for (i = 0 ; i < vm->def->nnets ; i++) {
        if (vm->def->nets[i]->ifname &&
            STREQ (vm->def->nets[i]->ifname, path)) {
            ret = 0;
            break;
        }
    }

    if (ret == 0)
        ret = linuxDomainInterfaceStats(path, stats);
    else
        openvzError(VIR_ERR_INVALID_ARG,
                    _("invalid path, '%s' is not a known interface"), path);

cleanup:
    if (vm)
        virDomainObjUnlock(vm);
    return ret;
}
コード例 #8
0
ファイル: vmware_driver.c プロジェクト: bigclouds/libvirt
static int
vmwareDomainUndefineFlags(virDomainPtr dom,
                          unsigned int flags)
{
    struct vmware_driver *driver = dom->conn->privateData;
    virDomainObjPtr vm;
    int ret = -1;

    virCheckFlags(0, -1);

    vmwareDriverLock(driver);
    vm = virDomainObjListFindByUUID(driver->domains, dom->uuid);

    if (!vm) {
        char uuidstr[VIR_UUID_STRING_BUFLEN];

        virUUIDFormat(dom->uuid, uuidstr);
        virReportError(VIR_ERR_NO_DOMAIN,
                       _("no domain with matching uuid '%s'"), uuidstr);
        goto cleanup;
    }

    if (!vm->persistent) {
        virReportError(VIR_ERR_OPERATION_INVALID,
                       "%s", _("cannot undefine transient domain"));
        goto cleanup;
    }

    if (vmwareUpdateVMStatus(driver, vm) < 0)
        goto cleanup;

    if (virDomainObjIsActive(vm)) {
        vm->persistent = 0;
    } else {
        virDomainObjListRemove(driver->domains, vm);
        vm = NULL;
    }

    ret = 0;

  cleanup:
    if (vm)
        virObjectUnlock(vm);
    vmwareDriverUnlock(driver);
    return ret;
}
コード例 #9
0
ファイル: bhyve_driver.c プロジェクト: hitchiker42/libvirt
static int
bhyveDomainUndefine(virDomainPtr domain)
{
    bhyveConnPtr privconn = domain->conn->privateData;
    virObjectEventPtr event = NULL;
    virDomainObjPtr vm;
    int ret = -1;

    if (!(vm = bhyveDomObjFromDomain(domain)))
        goto cleanup;

    if (virDomainUndefineEnsureACL(domain->conn, vm->def) < 0)
        goto cleanup;

    if (!vm->persistent) {
        virReportError(VIR_ERR_OPERATION_INVALID,
                       "%s", _("Cannot undefine transient domain"));
        goto cleanup;
    }

    if (virDomainDeleteConfig(BHYVE_CONFIG_DIR,
                              BHYVE_AUTOSTART_DIR,
                              vm) < 0)
        goto cleanup;

    event = virDomainEventLifecycleNewFromObj(vm,
                                              VIR_DOMAIN_EVENT_UNDEFINED,
                                              VIR_DOMAIN_EVENT_UNDEFINED_REMOVED);

    if (virDomainObjIsActive(vm)) {
        vm->persistent = 0;
    } else {
        virDomainObjListRemove(privconn->domains, vm);
        vm = NULL;
    }

    ret = 0;

 cleanup:
    if (vm)
        virObjectUnlock(vm);
    if (event)
        virObjectEventStateQueue(privconn->domainEventState, event);
    return ret;
}
コード例 #10
0
static int
bhyveDomainIsActive(virDomainPtr domain)
{
    virDomainObjPtr obj;
    int ret = -1;

    if (!(obj = bhyveDomObjFromDomain(domain)))
        goto cleanup;

    if (virDomainIsActiveEnsureACL(domain->conn, obj->def) < 0)
        goto cleanup;

    ret = virDomainObjIsActive(obj);

 cleanup:
    virObjectUnlock(obj);
    return ret;
}
コード例 #11
0
ファイル: openvz_driver.c プロジェクト: coolsnow77/libvirt
static int
openvzDomainUndefineFlags(virDomainPtr dom,
                          unsigned int flags)
{
    struct openvz_driver *driver = dom->conn->privateData;
    virDomainObjPtr vm;
    const char *prog[] = { VZCTL, "--quiet", "destroy", PROGRAM_SENTINAL, NULL };
    int ret = -1;
    int status;

    virCheckFlags(0, -1);

    openvzDriverLock(driver);
    vm = virDomainFindByUUID(&driver->domains, dom->uuid);
    if (!vm) {
        openvzError(VIR_ERR_NO_DOMAIN, "%s",
                    _("no domain with matching uuid"));
        goto cleanup;
    }

    if (openvzGetVEStatus(vm, &status, NULL) == -1)
        goto cleanup;

    openvzSetProgramSentinal(prog, vm->def->name);
    if (virRun(prog, NULL) < 0) {
        goto cleanup;
    }

    if (virDomainObjIsActive(vm)) {
        vm->persistent = 0;
    } else {
        virDomainRemoveInactive(&driver->domains, vm);
        vm = NULL;
    }

    ret = 0;

cleanup:
    if (vm)
        virDomainObjUnlock(vm);
    openvzDriverUnlock(driver);
    return ret;
}
コード例 #12
0
ファイル: bhyve_driver.c プロジェクト: hitchiker42/libvirt
static int
bhyveAutostartDomain(virDomainObjPtr vm, void *opaque)
{
    const struct bhyveAutostartData *data = opaque;
    int ret = 0;
    virObjectLock(vm);
    if (vm->autostart && !virDomainObjIsActive(vm)) {
        virResetLastError();
        ret = virBhyveProcessStart(data->conn, data->driver, vm,
                                   VIR_DOMAIN_RUNNING_BOOTED, 0);
        if (ret < 0) {
            virErrorPtr err = virGetLastError();
            VIR_ERROR(_("Failed to autostart VM '%s': %s"),
                      vm->def->name, err ? err->message : _("unknown error"));
        }
    }
    virObjectUnlock(vm);
    return ret;
}
コード例 #13
0
ファイル: openvz_driver.c プロジェクト: coolsnow77/libvirt
static int openvzDomainIsActive(virDomainPtr dom)
{
    struct openvz_driver *driver = dom->conn->privateData;
    virDomainObjPtr obj;
    int ret = -1;

    openvzDriverLock(driver);
    obj = virDomainFindByUUID(&driver->domains, dom->uuid);
    openvzDriverUnlock(driver);
    if (!obj) {
        openvzError(VIR_ERR_NO_DOMAIN, NULL);
        goto cleanup;
    }
    ret = virDomainObjIsActive(obj);

cleanup:
    if (obj)
        virDomainObjUnlock(obj);
    return ret;
}
コード例 #14
0
ファイル: vmware_driver.c プロジェクト: rbu/libvirt
static int
vmwareDomainUndefine(virDomainPtr dom)
{
    struct vmware_driver *driver = dom->conn->privateData;
    virDomainObjPtr vm;
    int ret = -1;

    vmwareDriverLock(driver);
    vm = virDomainFindByUUID(&driver->domains, dom->uuid);

    if (!vm) {
        char uuidstr[VIR_UUID_STRING_BUFLEN];

        virUUIDFormat(dom->uuid, uuidstr);
        vmwareError(VIR_ERR_NO_DOMAIN,
                    _("no domain with matching uuid '%s'"), uuidstr);
        goto cleanup;
    }

    if (virDomainObjIsActive(vm)) {
        vmwareError(VIR_ERR_OPERATION_INVALID,
                    "%s", _("cannot undefine active domain"));
        goto cleanup;
    }

    if (!vm->persistent) {
        vmwareError(VIR_ERR_OPERATION_INVALID,
                    "%s", _("cannot undefine transient domain"));
        goto cleanup;
    }

    virDomainRemoveInactive(&driver->domains, vm);
    vm = NULL;
    ret = 0;

  cleanup:
    if (vm)
        virDomainObjUnlock(vm);
    vmwareDriverUnlock(driver);
    return ret;
}
コード例 #15
0
ファイル: vmware_driver.c プロジェクト: bigclouds/libvirt
static int
vmwareDomainIsActive(virDomainPtr dom)
{
    struct vmware_driver *driver = dom->conn->privateData;
    virDomainObjPtr obj;
    int ret = -1;

    vmwareDriverLock(driver);
    obj = virDomainObjListFindByUUID(driver->domains, dom->uuid);
    vmwareDriverUnlock(driver);
    if (!obj) {
        virReportError(VIR_ERR_NO_DOMAIN, NULL);
        goto cleanup;
    }
    ret = virDomainObjIsActive(obj);

  cleanup:
    if (obj)
        virObjectUnlock(obj);
    return ret;
}
コード例 #16
0
static int
bhyveDomainUndefine(virDomainPtr domain)
{
    bhyveConnPtr privconn = domain->conn->privateData;
    virDomainObjPtr vm;
    int ret = -1;

    if (!(vm = bhyveDomObjFromDomain(domain)))
        goto cleanup;

    if (virDomainUndefineEnsureACL(domain->conn, vm->def) < 0)
        goto cleanup;

    if (!vm->persistent) {
        virReportError(VIR_ERR_OPERATION_INVALID,
                       "%s", _("Cannot undefine transient domain"));
        goto cleanup;
    }

    if (virDomainDeleteConfig(BHYVE_CONFIG_DIR,
                              BHYVE_AUTOSTART_DIR,
                              vm) < 0)
        goto cleanup;

    if (virDomainObjIsActive(vm)) {
        vm->persistent = 0;
    } else {
        virDomainObjListRemove(privconn->domains, vm);
        vm = NULL;
    }

    ret = 0;

 cleanup:
    virObjectUnlock(vm);
    return ret;
}
コード例 #17
0
ファイル: openvz_driver.c プロジェクト: coolsnow77/libvirt
static int openvzDomainResume(virDomainPtr dom) {
  struct openvz_driver *driver = dom->conn->privateData;
  virDomainObjPtr vm;
  const char *prog[] = {VZCTL, "--quiet", "chkpnt", PROGRAM_SENTINAL, "--resume", NULL};
  int ret = -1;

  openvzDriverLock(driver);
  vm = virDomainFindByUUID(&driver->domains, dom->uuid);
  openvzDriverUnlock(driver);

  if (!vm) {
      openvzError(VIR_ERR_NO_DOMAIN, "%s",
                  _("no domain with matching uuid"));
      goto cleanup;
  }

  if (!virDomainObjIsActive(vm)) {
      openvzError(VIR_ERR_OPERATION_INVALID, "%s",
                  _("Domain is not running"));
      goto cleanup;
  }

  if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
      openvzSetProgramSentinal(prog, vm->def->name);
      if (virRun(prog, NULL) < 0) {
          goto cleanup;
      }
      virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, VIR_DOMAIN_RUNNING_UNPAUSED);
  }

  ret = 0;

cleanup:
  if (vm)
      virDomainObjUnlock(vm);
  return ret;
}
コード例 #18
0
ファイル: lightos_driver.c プロジェクト: lzjqsdd/lightos
static int
lightosDomainCreate(virDomainPtr dom)
{
	lightosConnPtr privconn = dom->conn->privateData;
	virDomainObjPtr vm;
	int ret = -1;

	if(!(vm = lightosDomObjFromDomain(dom)))
		goto cleanup;

	if(virDomainCreateEnsureACL(dom->conn,vm->def)<0)
		goto cleanup;
	if (virDomainObjIsActive(vm)){
		virReportError(VIR_ERR_OPERATION_INVALID,
			"%s",_("Domain is already running"));
		goto cleanup;
	}
	
	ret = virLightosProcessStart(dom->conn,privconn,vm,VIR_DOMAIN_RUNNING_BOOTED);

cleanup:
	virObjectUnlock(vm);
	return ret;
}
コード例 #19
0
ファイル: qemu_migration.c プロジェクト: rbu/libvirt
int
qemuMigrationPrepareDirect(struct qemud_driver *driver,
                           virConnectPtr dconn,
                           const char *uri_in,
                           char **uri_out,
                           const char *dname,
                           const char *dom_xml)
{
    static int port = 0;
    virDomainDefPtr def = NULL;
    virDomainObjPtr vm = NULL;
    int this_port;
    char *hostname = NULL;
    char migrateFrom [64];
    const char *p;
    virDomainEventPtr event = NULL;
    int ret = -1;
    int internalret;
    qemuDomainObjPrivatePtr priv = NULL;
    struct timeval now;

    if (gettimeofday(&now, NULL) < 0) {
        virReportSystemError(errno, "%s",
                             _("cannot get time of day"));
        return -1;
    }

    /* The URI passed in may be NULL or a string "tcp://somehostname:port".
     *
     * If the URI passed in is NULL then we allocate a port number
     * from our pool of port numbers and return a URI of
     * "tcp://ourhostname:port".
     *
     * If the URI passed in is not NULL then we try to parse out the
     * port number and use that (note that the hostname is assumed
     * to be a correct hostname which refers to the target machine).
     */
    if (uri_in == NULL) {
        this_port = QEMUD_MIGRATION_FIRST_PORT + port++;
        if (port == QEMUD_MIGRATION_NUM_PORTS) port = 0;

        /* Get hostname */
        if ((hostname = virGetHostname(NULL)) == NULL)
            goto cleanup;

        if (STRPREFIX(hostname, "localhost")) {
            qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                            _("hostname on destination resolved to localhost, but migration requires an FQDN"));
            goto cleanup;
        }

        /* XXX this really should have been a properly well-formed
         * URI, but we can't add in tcp:// now without breaking
         * compatability with old targets. We at least make the
         * new targets accept both syntaxes though.
         */
        /* Caller frees */
        internalret = virAsprintf(uri_out, "tcp:%s:%d", hostname, this_port);
        if (internalret < 0) {
            virReportOOMError();
            goto cleanup;
        }
    } else {
        /* Check the URI starts with "tcp:".  We will escape the
         * URI when passing it to the qemu monitor, so bad
         * characters in hostname part don't matter.
         */
        if (!STRPREFIX (uri_in, "tcp:")) {
            qemuReportError (VIR_ERR_INVALID_ARG,
                             "%s", _("only tcp URIs are supported for KVM/QEMU migrations"));
            goto cleanup;
        }

        /* Get the port number. */
        p = strrchr (uri_in, ':');
        if (p == strchr(uri_in, ':')) {
            /* Generate a port */
            this_port = QEMUD_MIGRATION_FIRST_PORT + port++;
            if (port == QEMUD_MIGRATION_NUM_PORTS)
                port = 0;

            /* Caller frees */
            if (virAsprintf(uri_out, "%s:%d", uri_in, this_port) < 0) {
                virReportOOMError();
                goto cleanup;
            }

        } else {
            p++; /* definitely has a ':' in it, see above */
            this_port = virParseNumber (&p);
            if (this_port == -1 || p-uri_in != strlen (uri_in)) {
                qemuReportError(VIR_ERR_INVALID_ARG,
                                "%s", _("URI ended with incorrect ':port'"));
                goto cleanup;
            }
        }
    }

    if (*uri_out)
        VIR_DEBUG("Generated uri_out=%s", *uri_out);

    /* Parse the domain XML. */
    if (!(def = virDomainDefParseString(driver->caps, dom_xml,
                                        VIR_DOMAIN_XML_INACTIVE)))
        goto cleanup;

    if (!qemuMigrationIsAllowed(def))
        goto cleanup;

    /* Target domain name, maybe renamed. */
    if (dname) {
        VIR_FREE(def->name);
        def->name = strdup(dname);
        if (def->name == NULL)
            goto cleanup;
    }

    if (virDomainObjIsDuplicate(&driver->domains, def, 1) < 0)
        goto cleanup;

    if (!(vm = virDomainAssignDef(driver->caps,
                                  &driver->domains,
                                  def, true))) {
        /* virDomainAssignDef already set the error */
        goto cleanup;
    }
    def = NULL;
    priv = vm->privateData;

    if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
        goto cleanup;
    priv->jobActive = QEMU_JOB_MIGRATION_OUT;

    /* Domain starts inactive, even if the domain XML had an id field. */
    vm->def->id = -1;

    /* Start the QEMU daemon, with the same command-line arguments plus
     * -incoming tcp:0.0.0.0:port
     */
    snprintf (migrateFrom, sizeof (migrateFrom), "tcp:0.0.0.0:%d", this_port);
    if (qemuProcessStart(dconn, driver, vm, migrateFrom, true,
                         -1, NULL, VIR_VM_OP_MIGRATE_IN_START) < 0) {
        qemuAuditDomainStart(vm, "migrated", false);
        /* Note that we don't set an error here because qemuProcessStart
         * should have already done that.
         */
        if (!vm->persistent) {
            if (qemuDomainObjEndJob(vm) > 0)
                virDomainRemoveInactive(&driver->domains, vm);
            vm = NULL;
        }
        goto endjob;
    }

    qemuAuditDomainStart(vm, "migrated", true);
    event = virDomainEventNewFromObj(vm,
                                     VIR_DOMAIN_EVENT_STARTED,
                                     VIR_DOMAIN_EVENT_STARTED_MIGRATED);
    ret = 0;

endjob:
    if (vm &&
        qemuDomainObjEndJob(vm) == 0)
        vm = NULL;

    /* We set a fake job active which is held across
     * API calls until the finish() call. This prevents
     * any other APIs being invoked while incoming
     * migration is taking place
     */
    if (vm &&
        virDomainObjIsActive(vm)) {
        priv->jobActive = QEMU_JOB_MIGRATION_IN;
        priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
        priv->jobStart = timeval_to_ms(now);
    }

cleanup:
    VIR_FREE(hostname);
    virDomainDefFree(def);
    if (ret != 0)
        VIR_FREE(*uri_out);
    if (vm)
        virDomainObjUnlock(vm);
    if (event)
        qemuDomainEventQueue(driver, event);
    return ret;
}
コード例 #20
0
ファイル: lxc_process.c プロジェクト: i-ninth/libvirt
int virLXCProcessStop(virLXCDriverPtr driver,
                      virDomainObjPtr vm,
                      virDomainShutoffReason reason)
{
    int rc;
    virLXCDomainObjPrivatePtr priv;

    VIR_DEBUG("Stopping VM name=%s pid=%d reason=%d",
              vm->def->name, (int)vm->pid, (int)reason);
    if (!virDomainObjIsActive(vm)) {
        VIR_DEBUG("VM '%s' not active", vm->def->name);
        return 0;
    }

    priv = vm->privateData;

    if (vm->pid <= 0) {
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("Invalid PID %d for container"), vm->pid);
        return -1;
    }

    virSecurityManagerRestoreAllLabel(driver->securityManager,
                                      vm->def, false);
    virSecurityManagerReleaseLabel(driver->securityManager, vm->def);
    /* Clear out dynamically assigned labels */
    if (vm->def->nseclabels &&
        vm->def->seclabels[0]->type == VIR_DOMAIN_SECLABEL_DYNAMIC) {
        VIR_FREE(vm->def->seclabels[0]->model);
        VIR_FREE(vm->def->seclabels[0]->label);
        VIR_FREE(vm->def->seclabels[0]->imagelabel);
    }

    /* If the LXC domain is suspended we send all processes a SIGKILL
     * and thaw them. Upon wakeup the process sees the pending signal
     * and dies immediately. It is guaranteed that priv->cgroup != NULL
     * here because the domain has aleady been suspended using the
     * freezer cgroup.
     */
    if (reason == VIR_DOMAIN_SHUTOFF_DESTROYED &&
        virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
        if (virCgroupKillRecursive(priv->cgroup, SIGKILL) <= 0) {
            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                           _("Unable to kill all processes"));
            return -1;
        }

        if (virCgroupSetFreezerState(priv->cgroup, "THAWED") < 0) {
            virReportError(VIR_ERR_OPERATION_FAILED, "%s",
                           _("Unable to thaw all processes"));

            return -1;
        }

        goto cleanup;
    }

    if (priv->cgroup) {
        rc = virCgroupKillPainfully(priv->cgroup);
        if (rc < 0)
            return -1;
        if (rc > 0) {
            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                           _("Some processes refused to die"));
            return -1;
        }
    } else {
        /* If cgroup doesn't exist, just try cleaning up the
         * libvirt_lxc process */
        if (virProcessKillPainfully(vm->pid, true) < 0) {
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           _("Processes %d refused to die"), (int)vm->pid);
            return -1;
        }
    }

 cleanup:
    virLXCProcessCleanup(driver, vm, reason);

    return 0;
}
コード例 #21
0
ファイル: qemu_migration.c プロジェクト: rbu/libvirt
int
qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm)
{
    int ret = -1;
    int status;
    unsigned long long memProcessed;
    unsigned long long memRemaining;
    unsigned long long memTotal;
    qemuDomainObjPrivatePtr priv = vm->privateData;

    priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;

    while (priv->jobInfo.type == VIR_DOMAIN_JOB_UNBOUNDED) {
        /* Poll every 50ms for progress & to allow cancellation */
        struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
        struct timeval now;
        int rc;
        const char *job;

        switch (priv->jobActive) {
            case QEMU_JOB_MIGRATION_OUT:
                job = _("migration job");
                break;
            case QEMU_JOB_SAVE:
                job = _("domain save job");
                break;
            case QEMU_JOB_DUMP:
                job = _("domain core dump job");
                break;
            default:
                job = _("job");
        }


        if (!virDomainObjIsActive(vm)) {
            qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"),
                            job, _("guest unexpectedly quit"));
            goto cleanup;
        }

        if (priv->jobSignals & QEMU_JOB_SIGNAL_CANCEL) {
            priv->jobSignals ^= QEMU_JOB_SIGNAL_CANCEL;
            VIR_DEBUG0("Cancelling job at client request");
            qemuDomainObjEnterMonitorWithDriver(driver, vm);
            rc = qemuMonitorMigrateCancel(priv->mon);
            qemuDomainObjExitMonitorWithDriver(driver, vm);
            if (rc < 0) {
                VIR_WARN0("Unable to cancel job");
            }
        } else if (priv->jobSignals & QEMU_JOB_SIGNAL_SUSPEND) {
            priv->jobSignals ^= QEMU_JOB_SIGNAL_SUSPEND;
            VIR_DEBUG0("Pausing domain for non-live migration");
            if (qemuMigrationSetOffline(driver, vm) < 0)
                VIR_WARN0("Unable to pause domain");
        } else if (priv->jobSignals & QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME) {
            unsigned long long ms = priv->jobSignalsData.migrateDowntime;

            priv->jobSignals ^= QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME;
            priv->jobSignalsData.migrateDowntime = 0;
            VIR_DEBUG("Setting migration downtime to %llums", ms);
            qemuDomainObjEnterMonitorWithDriver(driver, vm);
            rc = qemuMonitorSetMigrationDowntime(priv->mon, ms);
            qemuDomainObjExitMonitorWithDriver(driver, vm);
            if (rc < 0)
                VIR_WARN0("Unable to set migration downtime");
        } else if (priv->jobSignals & QEMU_JOB_SIGNAL_MIGRATE_SPEED) {
            unsigned long bandwidth = priv->jobSignalsData.migrateBandwidth;

            priv->jobSignals ^= QEMU_JOB_SIGNAL_MIGRATE_SPEED;
            priv->jobSignalsData.migrateBandwidth = 0;
            VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
            qemuDomainObjEnterMonitorWithDriver(driver, vm);
            rc = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
            qemuDomainObjExitMonitorWithDriver(driver, vm);
            if (rc < 0)
                VIR_WARN0("Unable to set migration speed");
        }

        /* Repeat check because the job signals might have caused
         * guest to die
         */
        if (!virDomainObjIsActive(vm)) {
            qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"),
                            job, _("guest unexpectedly quit"));
            goto cleanup;
        }

        qemuDomainObjEnterMonitorWithDriver(driver, vm);
        rc = qemuMonitorGetMigrationStatus(priv->mon,
                                           &status,
                                           &memProcessed,
                                           &memRemaining,
                                           &memTotal);
        qemuDomainObjExitMonitorWithDriver(driver, vm);

        if (rc < 0) {
            priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED;
            goto cleanup;
        }

        if (gettimeofday(&now, NULL) < 0) {
            priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED;
            virReportSystemError(errno, "%s",
                                 _("cannot get time of day"));
            goto cleanup;
        }
        priv->jobInfo.timeElapsed = timeval_to_ms(now) - priv->jobStart;

        switch (status) {
        case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
            priv->jobInfo.type = VIR_DOMAIN_JOB_NONE;
            qemuReportError(VIR_ERR_OPERATION_FAILED,
                            _("%s: %s"), job, _("is not active"));
            break;

        case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE:
            priv->jobInfo.dataTotal = memTotal;
            priv->jobInfo.dataRemaining = memRemaining;
            priv->jobInfo.dataProcessed = memProcessed;

            priv->jobInfo.memTotal = memTotal;
            priv->jobInfo.memRemaining = memRemaining;
            priv->jobInfo.memProcessed = memProcessed;
            break;

        case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
            priv->jobInfo.type = VIR_DOMAIN_JOB_COMPLETED;
            ret = 0;
            break;

        case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
            priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED;
            qemuReportError(VIR_ERR_OPERATION_FAILED,
                            _("%s: %s"), job, _("unexpectedly failed"));
            break;

        case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
            priv->jobInfo.type = VIR_DOMAIN_JOB_CANCELLED;
            qemuReportError(VIR_ERR_OPERATION_FAILED,
                            _("%s: %s"), job, _("canceled by client"));
            break;
        }

        virDomainObjUnlock(vm);
        qemuDriverUnlock(driver);

        nanosleep(&ts, NULL);

        qemuDriverLock(driver);
        virDomainObjLock(vm);
    }

cleanup:
    return ret;
}


/* Prepare is the first step, and it runs on the destination host.
 *
 * This version starts an empty VM listening on a localhost TCP port, and
 * sets up the corresponding virStream to handle the incoming data.
 */
int
qemuMigrationPrepareTunnel(struct qemud_driver *driver,
                           virConnectPtr dconn,
                           virStreamPtr st,
                           const char *dname,
                           const char *dom_xml)
{
    virDomainDefPtr def = NULL;
    virDomainObjPtr vm = NULL;
    virDomainEventPtr event = NULL;
    int ret = -1;
    int internalret;
    int dataFD[2] = { -1, -1 };
    virBitmapPtr qemuCaps = NULL;
    qemuDomainObjPrivatePtr priv = NULL;
    struct timeval now;

    if (gettimeofday(&now, NULL) < 0) {
        virReportSystemError(errno, "%s",
                             _("cannot get time of day"));
        return -1;
    }

    /* Parse the domain XML. */
    if (!(def = virDomainDefParseString(driver->caps, dom_xml,
                                        VIR_DOMAIN_XML_INACTIVE)))
        goto cleanup;

    if (!qemuMigrationIsAllowed(def))
        goto cleanup;

    /* Target domain name, maybe renamed. */
    if (dname) {
        VIR_FREE(def->name);
        def->name = strdup(dname);
        if (def->name == NULL)
            goto cleanup;
    }

    if (virDomainObjIsDuplicate(&driver->domains, def, 1) < 0)
        goto cleanup;

    if (!(vm = virDomainAssignDef(driver->caps,
                                  &driver->domains,
                                  def, true))) {
        /* virDomainAssignDef already set the error */
        goto cleanup;
    }
    def = NULL;
    priv = vm->privateData;

    if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
        goto cleanup;
    priv->jobActive = QEMU_JOB_MIGRATION_OUT;

    /* Domain starts inactive, even if the domain XML had an id field. */
    vm->def->id = -1;

    if (pipe(dataFD) < 0 ||
        virSetCloseExec(dataFD[0]) < 0) {
        virReportSystemError(errno, "%s",
                             _("cannot create pipe for tunnelled migration"));
        goto endjob;
    }

    /* check that this qemu version supports the interactive exec */
    if (qemuCapsExtractVersionInfo(vm->def->emulator, vm->def->os.arch,
                                   NULL, &qemuCaps) < 0) {
        qemuReportError(VIR_ERR_INTERNAL_ERROR,
                        _("Cannot determine QEMU argv syntax %s"),
                        vm->def->emulator);
        goto endjob;
    }
    /* Start the QEMU daemon, with the same command-line arguments plus
     * -incoming stdin (which qemu_command might convert to exec:cat or fd:n)
     */
    internalret = qemuProcessStart(dconn, driver, vm, "stdin", true, dataFD[1],
                                   NULL, VIR_VM_OP_MIGRATE_IN_START);
    if (internalret < 0) {
        qemuAuditDomainStart(vm, "migrated", false);
        /* Note that we don't set an error here because qemuProcessStart
         * should have already done that.
         */
        if (!vm->persistent) {
            virDomainRemoveInactive(&driver->domains, vm);
            vm = NULL;
        }
        goto endjob;
    }

    if (virFDStreamOpen(st, dataFD[0]) < 0) {
        qemuAuditDomainStart(vm, "migrated", false);
        qemuProcessStop(driver, vm, 0);
        if (!vm->persistent) {
            if (qemuDomainObjEndJob(vm) > 0)
                virDomainRemoveInactive(&driver->domains, vm);
            vm = NULL;
        }
        virReportSystemError(errno, "%s",
                             _("cannot pass pipe for tunnelled migration"));
        goto endjob;
    }

    qemuAuditDomainStart(vm, "migrated", true);

    event = virDomainEventNewFromObj(vm,
                                     VIR_DOMAIN_EVENT_STARTED,
                                     VIR_DOMAIN_EVENT_STARTED_MIGRATED);
    ret = 0;

endjob:
    if (vm &&
        qemuDomainObjEndJob(vm) == 0)
        vm = NULL;

    /* We set a fake job active which is held across
     * API calls until the finish() call. This prevents
     * any other APIs being invoked while incoming
     * migration is taking place
     */
    if (vm &&
        virDomainObjIsActive(vm)) {
        priv->jobActive = QEMU_JOB_MIGRATION_IN;
        priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
        priv->jobStart = timeval_to_ms(now);
    }

cleanup:
    qemuCapsFree(qemuCaps);
    virDomainDefFree(def);
    VIR_FORCE_CLOSE(dataFD[0]);
    VIR_FORCE_CLOSE(dataFD[1]);
    if (vm)
        virDomainObjUnlock(vm);
    if (event)
        qemuDomainEventQueue(driver, event);
    qemuDriverUnlock(driver);
    return ret;
}