static qemuMigrationCookiePtr qemuMigrationCookieNew(virDomainObjPtr dom) { qemuDomainObjPrivatePtr priv = dom->privateData; qemuMigrationCookiePtr mig = NULL; const char *name; if (VIR_ALLOC(mig) < 0) goto error; if (priv->origname) name = priv->origname; else name = dom->def->name; if (VIR_STRDUP(mig->name, name) < 0) goto error; memcpy(mig->uuid, dom->def->uuid, VIR_UUID_BUFLEN); if (!(mig->localHostname = virGetHostname())) goto error; if (virGetHostUUID(mig->localHostuuid) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Unable to obtain host UUID")); goto error; } return mig; error: qemuMigrationCookieFree(mig); return NULL; }
static char * bhyveConnectGetHostname(virConnectPtr conn ATTRIBUTE_UNUSED) { if (virConnectGetHostnameEnsureACL(conn) < 0) return NULL; return virGetHostname(); }
struct daemonConfig* daemonConfigNew(bool privileged ATTRIBUTE_UNUSED) { struct daemonConfig *data; char *localhost; int ret; if (VIR_ALLOC(data) < 0) { virReportOOMError(); return NULL; } data->listen_tls = 1; data->listen_tcp = 0; if (!(data->tls_port = strdup(LIBVIRTD_TLS_PORT))) goto no_memory; if (!(data->tcp_port = strdup(LIBVIRTD_TCP_PORT))) goto no_memory; /* Only default to PolicyKit if running as root */ #if HAVE_POLKIT if (privileged) { data->auth_unix_rw = REMOTE_AUTH_POLKIT; data->auth_unix_ro = REMOTE_AUTH_POLKIT; } else { #endif data->auth_unix_rw = REMOTE_AUTH_NONE; data->auth_unix_ro = REMOTE_AUTH_NONE; #if HAVE_POLKIT } #endif if (data->auth_unix_rw == REMOTE_AUTH_POLKIT) data->unix_sock_rw_perms = strdup("0777"); /* Allow world */ else data->unix_sock_rw_perms = strdup("0700"); /* Allow user only */ data->unix_sock_ro_perms = strdup("0777"); /* Always allow world */ if (!data->unix_sock_ro_perms || !data->unix_sock_rw_perms) goto no_memory; #if HAVE_SASL data->auth_tcp = REMOTE_AUTH_SASL; #else data->auth_tcp = REMOTE_AUTH_NONE; #endif data->auth_tls = REMOTE_AUTH_NONE; data->mdns_adv = 0; data->min_workers = 5; data->max_workers = 20; data->max_clients = 20; data->prio_workers = 5; data->max_requests = 20; data->max_client_requests = 5; data->log_buffer_size = 64; data->audit_level = 1; data->audit_logging = 0; data->keepalive_interval = 5; data->keepalive_count = 5; data->keepalive_required = 0; localhost = virGetHostname(NULL); if (localhost == NULL) { /* we couldn't resolve the hostname; assume that we are * running in disconnected operation, and report a less * useful Avahi string */ ret = virAsprintf(&data->mdns_name, "Virtualization Host"); } else { char *tmp; /* Extract the host part of the potentially FQDN */ if ((tmp = strchr(localhost, '.'))) *tmp = '\0'; ret = virAsprintf(&data->mdns_name, "Virtualization Host %s", localhost); } VIR_FREE(localhost); if (ret < 0) goto no_memory; return data; no_memory: virReportOOMError(); daemonConfigFree(data); return NULL; }
int libxlDomainMigrationPrepare(virConnectPtr dconn, virDomainDefPtr def, const char *uri_in, char **uri_out, unsigned int flags) { libxlDriverPrivatePtr driver = dconn->privateData; virDomainObjPtr vm = NULL; char *hostname = NULL; unsigned short port; char portstr[100]; virURIPtr uri = NULL; virNetSocketPtr *socks = NULL; size_t nsocks = 0; int nsocks_listen = 0; libxlMigrationDstArgs *args; size_t i; int ret = -1; if (!(vm = virDomainObjListAdd(driver->domains, def, driver->xmlopt, VIR_DOMAIN_OBJ_LIST_ADD_LIVE | VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE, NULL))) goto cleanup; /* Create socket connection to receive migration data */ if (!uri_in) { if ((hostname = virGetHostname()) == NULL) goto cleanup; if (STRPREFIX(hostname, "localhost")) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("hostname on destination resolved to localhost," " but migration requires an FQDN")); goto cleanup; } if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0) goto cleanup; if (virAsprintf(uri_out, "tcp://%s:%d", hostname, port) < 0) goto cleanup; } else { if (!(STRPREFIX(uri_in, "tcp://"))) { /* not full URI, add prefix tcp:// */ char *tmp; if (virAsprintf(&tmp, "tcp://%s", uri_in) < 0) goto cleanup; uri = virURIParse(tmp); VIR_FREE(tmp); } else { uri = virURIParse(uri_in); } if (uri == NULL) { virReportError(VIR_ERR_INVALID_ARG, _("unable to parse URI: %s"), uri_in); goto cleanup; } if (uri->server == NULL) { virReportError(VIR_ERR_INVALID_ARG, _("missing host in migration URI: %s"), uri_in); goto cleanup; } else { hostname = uri->server; } if (uri->port == 0) { if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0) goto cleanup; } else { port = uri->port; } if (virAsprintf(uri_out, "tcp://%s:%d", hostname, port) < 0) goto cleanup; } snprintf(portstr, sizeof(portstr), "%d", port); if (virNetSocketNewListenTCP(hostname, portstr, &socks, &nsocks) < 0) { virReportError(VIR_ERR_OPERATION_FAILED, "%s", _("Fail to create socket for incoming migration")); goto cleanup; } if (libxlMigrationDstArgsInitialize() < 0) goto cleanup; if (!(args = virObjectNew(libxlMigrationDstArgsClass))) goto cleanup; args->conn = dconn; args->vm = vm; args->flags = flags; args->socks = socks; args->nsocks = nsocks; for (i = 0; i < nsocks; i++) { if (virNetSocketSetBlocking(socks[i], true) < 0) continue; if (virNetSocketListen(socks[i], 1) < 0) continue; if (virNetSocketAddIOCallback(socks[i], VIR_EVENT_HANDLE_READABLE, libxlDoMigrateReceive, args, virObjectFreeCallback) < 0) continue; /* * Successfully added sock to event loop. Take a ref on args to * ensure it is not freed until sock is removed from the event loop. * Ref is dropped in virObjectFreeCallback after being removed * from the event loop. */ virObjectRef(args); nsocks_listen++; } /* Done with args in this function, drop reference */ virObjectUnref(args); if (!nsocks_listen) goto cleanup; ret = 0; goto done; cleanup: for (i = 0; i < nsocks; i++) { virNetSocketClose(socks[i]); virObjectUnref(socks[i]); } done: virURIFree(uri); if (vm) virObjectUnlock(vm); return ret; }
struct daemonConfig* daemonConfigNew(bool privileged ATTRIBUTE_UNUSED) { struct daemonConfig *data; char *localhost; int ret; if (VIR_ALLOC(data) < 0) return NULL; data->listen_tls = 1; data->listen_tcp = 0; if (VIR_STRDUP(data->tls_port, LIBVIRTD_TLS_PORT) < 0 || VIR_STRDUP(data->tcp_port, LIBVIRTD_TCP_PORT) < 0) goto error; /* Only default to PolicyKit if running as root */ #if WITH_POLKIT if (privileged) { data->auth_unix_rw = REMOTE_AUTH_POLKIT; data->auth_unix_ro = REMOTE_AUTH_POLKIT; } else { #endif data->auth_unix_rw = REMOTE_AUTH_NONE; data->auth_unix_ro = REMOTE_AUTH_NONE; #if WITH_POLKIT } #endif if (VIR_STRDUP(data->unix_sock_rw_perms, data->auth_unix_rw == REMOTE_AUTH_POLKIT ? "0777" : "0700") < 0 || VIR_STRDUP(data->unix_sock_ro_perms, "0777") < 0 || VIR_STRDUP(data->unix_sock_admin_perms, "0700") < 0) goto error; #if WITH_SASL data->auth_tcp = REMOTE_AUTH_SASL; #else data->auth_tcp = REMOTE_AUTH_NONE; #endif data->auth_tls = REMOTE_AUTH_NONE; data->mdns_adv = 0; data->min_workers = 5; data->max_workers = 20; data->max_clients = 5000; data->max_queued_clients = 1000; data->max_anonymous_clients = 20; data->prio_workers = 5; data->max_requests = 20; data->max_client_requests = 5; data->audit_level = 1; data->audit_logging = 0; data->keepalive_interval = 5; data->keepalive_count = 5; data->admin_min_workers = 5; data->admin_max_workers = 20; data->admin_max_clients = 5000; data->admin_max_queued_clients = 20; data->admin_max_client_requests = 5; data->admin_keepalive_interval = 5; data->admin_keepalive_count = 5; localhost = virGetHostname(); if (localhost == NULL) { /* we couldn't resolve the hostname; assume that we are * running in disconnected operation, and report a less * useful Avahi string */ ret = VIR_STRDUP(data->mdns_name, "Virtualization Host"); } else { char *tmp; /* Extract the host part of the potentially FQDN */ if ((tmp = strchr(localhost, '.'))) *tmp = '\0'; ret = virAsprintf(&data->mdns_name, "Virtualization Host %s", localhost); } VIR_FREE(localhost); if (ret < 0) goto error; return data; error: daemonConfigFree(data); return NULL; }
int qemuMigrationPrepareDirect(struct qemud_driver *driver, virConnectPtr dconn, const char *uri_in, char **uri_out, const char *dname, const char *dom_xml) { static int port = 0; virDomainDefPtr def = NULL; virDomainObjPtr vm = NULL; int this_port; char *hostname = NULL; char migrateFrom [64]; const char *p; virDomainEventPtr event = NULL; int ret = -1; int internalret; qemuDomainObjPrivatePtr priv = NULL; struct timeval now; if (gettimeofday(&now, NULL) < 0) { virReportSystemError(errno, "%s", _("cannot get time of day")); return -1; } /* The URI passed in may be NULL or a string "tcp://somehostname:port". * * If the URI passed in is NULL then we allocate a port number * from our pool of port numbers and return a URI of * "tcp://ourhostname:port". * * If the URI passed in is not NULL then we try to parse out the * port number and use that (note that the hostname is assumed * to be a correct hostname which refers to the target machine). */ if (uri_in == NULL) { this_port = QEMUD_MIGRATION_FIRST_PORT + port++; if (port == QEMUD_MIGRATION_NUM_PORTS) port = 0; /* Get hostname */ if ((hostname = virGetHostname(NULL)) == NULL) goto cleanup; if (STRPREFIX(hostname, "localhost")) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("hostname on destination resolved to localhost, but migration requires an FQDN")); goto cleanup; } /* XXX this really should have been a properly well-formed * URI, but we can't add in tcp:// now without breaking * compatability with old targets. We at least make the * new targets accept both syntaxes though. */ /* Caller frees */ internalret = virAsprintf(uri_out, "tcp:%s:%d", hostname, this_port); if (internalret < 0) { virReportOOMError(); goto cleanup; } } else { /* Check the URI starts with "tcp:". We will escape the * URI when passing it to the qemu monitor, so bad * characters in hostname part don't matter. */ if (!STRPREFIX (uri_in, "tcp:")) { qemuReportError (VIR_ERR_INVALID_ARG, "%s", _("only tcp URIs are supported for KVM/QEMU migrations")); goto cleanup; } /* Get the port number. */ p = strrchr (uri_in, ':'); if (p == strchr(uri_in, ':')) { /* Generate a port */ this_port = QEMUD_MIGRATION_FIRST_PORT + port++; if (port == QEMUD_MIGRATION_NUM_PORTS) port = 0; /* Caller frees */ if (virAsprintf(uri_out, "%s:%d", uri_in, this_port) < 0) { virReportOOMError(); goto cleanup; } } else { p++; /* definitely has a ':' in it, see above */ this_port = virParseNumber (&p); if (this_port == -1 || p-uri_in != strlen (uri_in)) { qemuReportError(VIR_ERR_INVALID_ARG, "%s", _("URI ended with incorrect ':port'")); goto cleanup; } } } if (*uri_out) VIR_DEBUG("Generated uri_out=%s", *uri_out); /* Parse the domain XML. */ if (!(def = virDomainDefParseString(driver->caps, dom_xml, VIR_DOMAIN_XML_INACTIVE))) goto cleanup; if (!qemuMigrationIsAllowed(def)) goto cleanup; /* Target domain name, maybe renamed. */ if (dname) { VIR_FREE(def->name); def->name = strdup(dname); if (def->name == NULL) goto cleanup; } if (virDomainObjIsDuplicate(&driver->domains, def, 1) < 0) goto cleanup; if (!(vm = virDomainAssignDef(driver->caps, &driver->domains, def, true))) { /* virDomainAssignDef already set the error */ goto cleanup; } def = NULL; priv = vm->privateData; if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) goto cleanup; priv->jobActive = QEMU_JOB_MIGRATION_OUT; /* Domain starts inactive, even if the domain XML had an id field. */ vm->def->id = -1; /* Start the QEMU daemon, with the same command-line arguments plus * -incoming tcp:0.0.0.0:port */ snprintf (migrateFrom, sizeof (migrateFrom), "tcp:0.0.0.0:%d", this_port); if (qemuProcessStart(dconn, driver, vm, migrateFrom, true, -1, NULL, VIR_VM_OP_MIGRATE_IN_START) < 0) { qemuAuditDomainStart(vm, "migrated", false); /* Note that we don't set an error here because qemuProcessStart * should have already done that. */ if (!vm->persistent) { if (qemuDomainObjEndJob(vm) > 0) virDomainRemoveInactive(&driver->domains, vm); vm = NULL; } goto endjob; } qemuAuditDomainStart(vm, "migrated", true); event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_STARTED, VIR_DOMAIN_EVENT_STARTED_MIGRATED); ret = 0; endjob: if (vm && qemuDomainObjEndJob(vm) == 0) vm = NULL; /* We set a fake job active which is held across * API calls until the finish() call. This prevents * any other APIs being invoked while incoming * migration is taking place */ if (vm && virDomainObjIsActive(vm)) { priv->jobActive = QEMU_JOB_MIGRATION_IN; priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED; priv->jobStart = timeval_to_ms(now); } cleanup: VIR_FREE(hostname); virDomainDefFree(def); if (ret != 0) VIR_FREE(*uri_out); if (vm) virDomainObjUnlock(vm); if (event) qemuDomainEventQueue(driver, event); return ret; }