コード例 #1
0
ファイル: logging.c プロジェクト: amery/libvirt-vserver
/**
 * virLogSetDefaultPriority:
 * @priority: the default priority level
 *
 * Set the default priority level, i.e. any logged data of a priority
 * equal or superior to this level will be logged, unless a specific rule
 * was defined for the log category of the message.
 *
 * Returns 0 if successful, -1 in case of error.
 */
int virLogSetDefaultPriority(int priority) {
    if ((priority < VIR_LOG_DEBUG) || (priority > VIR_LOG_ERROR)) {
        VIR_WARN0("Ignoring invalid log level setting.");
        return(-1);
    }
    if (!virLogInitialized)
        virLogStartup();
    virLogDefaultPriority = priority;
    return(0);
}
コード例 #2
0
ファイル: lxc_controller.c プロジェクト: rbu/libvirt
static int lxcControllerClearCapabilities(void)
{
#if HAVE_CAPNG
    int ret;

    capng_clear(CAPNG_SELECT_BOTH);

    if ((ret = capng_apply(CAPNG_SELECT_BOTH)) < 0) {
        lxcError(VIR_ERR_INTERNAL_ERROR,
                 _("failed to apply capabilities: %d"), ret);
        return -1;
    }
#else
    VIR_WARN0("libcap-ng support not compiled in, unable to clear capabilities");
#endif
    return 0;
}
コード例 #3
0
ファイル: lxc_container.c プロジェクト: amery/libvirt-vserver
/*
 * This is running as the 'init' process insid the container.
 * It removes some capabilities that could be dangerous to
 * host system, since they are not currently "containerized"
 */
static int lxcContainerDropCapabilities(void)
{
#if HAVE_CAPNG
    int ret;

    capng_get_caps_process();

    if ((ret = capng_updatev(CAPNG_DROP,
                             CAPNG_EFFECTIVE | CAPNG_PERMITTED |
                             CAPNG_INHERITABLE | CAPNG_BOUNDING_SET,
                             CAP_SYS_BOOT, /* No use of reboot */
                             CAP_SYS_MODULE, /* No kernel module loading */
                             CAP_SYS_TIME, /* No changing the clock */
                             CAP_AUDIT_CONTROL, /* No messing with auditing status */
                             CAP_MAC_ADMIN, /* No messing with LSM config */
                             -1 /* sentinal */)) < 0) {
        lxcError(VIR_ERR_INTERNAL_ERROR,
                 _("Failed to remove capabilities: %d"), ret);
        return -1;
    }

    if ((ret = capng_apply(CAPNG_SELECT_BOTH)) < 0) {
        lxcError(VIR_ERR_INTERNAL_ERROR,
                 _("Failed to apply capabilities: %d"), ret);
        return -1;
    }

    /* We do not need to call capng_lock() in this case. The bounding
     * set restriction will prevent them reacquiring sys_boot/module/time,
     * etc which is all that matters for the container. Once inside the
     * container it is fine for SECURE_NOROOT / SECURE_NO_SETUID_FIXUP to
     * be unmasked  - they can never escape the bounding set. */

#else
    VIR_WARN0("libcap-ng support not compiled in, unable to clear capabilities");
#endif
    return 0;
}
コード例 #4
0
ファイル: lxc_controller.c プロジェクト: rbu/libvirt
static int
lxcControllerRun(virDomainDefPtr def,
                 unsigned int nveths,
                 char **veths,
                 int monitor,
                 int client,
                 int appPty)
{
    int rc = -1;
    int control[2] = { -1, -1};
    int containerPty = -1;
    char *containerPtyPath = NULL;
    pid_t container = -1;
    virDomainFSDefPtr root;
    char *devpts = NULL;
    char *devptmx = NULL;

    if (socketpair(PF_UNIX, SOCK_STREAM, 0, control) < 0) {
        virReportSystemError(errno, "%s",
                             _("sockpair failed"));
        goto cleanup;
    }

    root = virDomainGetRootFilesystem(def);

    if (lxcSetContainerResources(def) < 0)
        goto cleanup;

    /*
     * If doing a chroot style setup, we need to prepare
     * a private /dev/pts for the child now, which they
     * will later move into position.
     *
     * This is complex because 'virsh console' needs to
     * use /dev/pts from the host OS, and the guest OS
     * needs to use /dev/pts from the guest.
     *
     * This means that we (libvirt_lxc) need to see and
     * use both /dev/pts instances. We're running in the
     * host OS context though and don't want to expose
     * the guest OS /dev/pts there.
     *
     * Thus we call unshare(CLONE_NS) so that we can see
     * the guest's new /dev/pts, without it becoming
     * visible to the host OS. We also put the root FS
     * into slave mode, just in case it was currently
     * marked as shared
     */
    if (root) {
        VIR_DEBUG0("Setting up private /dev/pts");
        if (unshare(CLONE_NEWNS) < 0) {
            virReportSystemError(errno, "%s",
                                 _("Cannot unshare mount namespace"));
            goto cleanup;
        }

        if (mount("", "/", NULL, MS_SLAVE|MS_REC, NULL) < 0) {
            virReportSystemError(errno, "%s",
                                 _("Failed to switch root mount into slave mode"));
            goto cleanup;
        }

        if (virAsprintf(&devpts, "%s/dev/pts", root->src) < 0 ||
            virAsprintf(&devptmx, "%s/dev/pts/ptmx", root->src) < 0) {
            virReportOOMError();
            goto cleanup;
        }

        if (virFileMakePath(devpts) != 0) {
            virReportSystemError(errno,
                                 _("Failed to make path %s"),
                                 devpts);
            goto cleanup;
        }

        VIR_DEBUG("Mouting 'devpts' on %s", devpts);
        if (mount("devpts", devpts, "devpts", 0,
                  "newinstance,ptmxmode=0666,mode=0620,gid=5") < 0) {
            virReportSystemError(errno,
                                 _("Failed to mount devpts on %s"),
                                 devpts);
            goto cleanup;
        }

        if (access(devptmx, R_OK) < 0) {
            VIR_WARN0("Kernel does not support private devpts, using shared devpts");
            VIR_FREE(devptmx);
        }
    }

    if (devptmx) {
        VIR_DEBUG("Opening tty on private %s", devptmx);
        if (virFileOpenTtyAt(devptmx,
                             &containerPty,
                             &containerPtyPath,
                             0) < 0) {
            virReportSystemError(errno, "%s",
                                 _("Failed to allocate tty"));
            goto cleanup;
        }
    } else {
        VIR_DEBUG0("Opening tty on shared /dev/ptmx");
        if (virFileOpenTty(&containerPty,
                           &containerPtyPath,
                           0) < 0) {
            virReportSystemError(errno, "%s",
                                 _("Failed to allocate tty"));
            goto cleanup;
        }
    }

    if (lxcSetPersonality(def) < 0)
        goto cleanup;

    if ((container = lxcContainerStart(def,
                                       nveths,
                                       veths,
                                       control[1],
                                       containerPtyPath)) < 0)
        goto cleanup;
    VIR_FORCE_CLOSE(control[1]);

    if (lxcControllerMoveInterfaces(nveths, veths, container) < 0)
        goto cleanup;

    if (lxcContainerSendContinue(control[0]) < 0)
        goto cleanup;

    /* Now the container is running, there's no need for us to keep
       any elevated capabilities */
    if (lxcControllerClearCapabilities() < 0)
        goto cleanup;

    rc = lxcControllerMain(monitor, client, appPty, containerPty, container);

cleanup:
    VIR_FREE(devptmx);
    VIR_FREE(devpts);
    VIR_FORCE_CLOSE(control[0]);
    VIR_FORCE_CLOSE(control[1]);
    VIR_FREE(containerPtyPath);
    VIR_FORCE_CLOSE(containerPty);

    if (container > 1) {
        int status;
        kill(container, SIGTERM);
        if (!(waitpid(container, &status, WNOHANG) == 0 &&
            WIFEXITED(status)))
            kill(container, SIGKILL);
        waitpid(container, NULL, 0);
    }
    return rc;
}
コード例 #5
0
ファイル: qemu_migration.c プロジェクト: rbu/libvirt
int
qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm)
{
    int ret = -1;
    int status;
    unsigned long long memProcessed;
    unsigned long long memRemaining;
    unsigned long long memTotal;
    qemuDomainObjPrivatePtr priv = vm->privateData;

    priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;

    while (priv->jobInfo.type == VIR_DOMAIN_JOB_UNBOUNDED) {
        /* Poll every 50ms for progress & to allow cancellation */
        struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
        struct timeval now;
        int rc;
        const char *job;

        switch (priv->jobActive) {
            case QEMU_JOB_MIGRATION_OUT:
                job = _("migration job");
                break;
            case QEMU_JOB_SAVE:
                job = _("domain save job");
                break;
            case QEMU_JOB_DUMP:
                job = _("domain core dump job");
                break;
            default:
                job = _("job");
        }


        if (!virDomainObjIsActive(vm)) {
            qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"),
                            job, _("guest unexpectedly quit"));
            goto cleanup;
        }

        if (priv->jobSignals & QEMU_JOB_SIGNAL_CANCEL) {
            priv->jobSignals ^= QEMU_JOB_SIGNAL_CANCEL;
            VIR_DEBUG0("Cancelling job at client request");
            qemuDomainObjEnterMonitorWithDriver(driver, vm);
            rc = qemuMonitorMigrateCancel(priv->mon);
            qemuDomainObjExitMonitorWithDriver(driver, vm);
            if (rc < 0) {
                VIR_WARN0("Unable to cancel job");
            }
        } else if (priv->jobSignals & QEMU_JOB_SIGNAL_SUSPEND) {
            priv->jobSignals ^= QEMU_JOB_SIGNAL_SUSPEND;
            VIR_DEBUG0("Pausing domain for non-live migration");
            if (qemuMigrationSetOffline(driver, vm) < 0)
                VIR_WARN0("Unable to pause domain");
        } else if (priv->jobSignals & QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME) {
            unsigned long long ms = priv->jobSignalsData.migrateDowntime;

            priv->jobSignals ^= QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME;
            priv->jobSignalsData.migrateDowntime = 0;
            VIR_DEBUG("Setting migration downtime to %llums", ms);
            qemuDomainObjEnterMonitorWithDriver(driver, vm);
            rc = qemuMonitorSetMigrationDowntime(priv->mon, ms);
            qemuDomainObjExitMonitorWithDriver(driver, vm);
            if (rc < 0)
                VIR_WARN0("Unable to set migration downtime");
        } else if (priv->jobSignals & QEMU_JOB_SIGNAL_MIGRATE_SPEED) {
            unsigned long bandwidth = priv->jobSignalsData.migrateBandwidth;

            priv->jobSignals ^= QEMU_JOB_SIGNAL_MIGRATE_SPEED;
            priv->jobSignalsData.migrateBandwidth = 0;
            VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
            qemuDomainObjEnterMonitorWithDriver(driver, vm);
            rc = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
            qemuDomainObjExitMonitorWithDriver(driver, vm);
            if (rc < 0)
                VIR_WARN0("Unable to set migration speed");
        }

        /* Repeat check because the job signals might have caused
         * guest to die
         */
        if (!virDomainObjIsActive(vm)) {
            qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"),
                            job, _("guest unexpectedly quit"));
            goto cleanup;
        }

        qemuDomainObjEnterMonitorWithDriver(driver, vm);
        rc = qemuMonitorGetMigrationStatus(priv->mon,
                                           &status,
                                           &memProcessed,
                                           &memRemaining,
                                           &memTotal);
        qemuDomainObjExitMonitorWithDriver(driver, vm);

        if (rc < 0) {
            priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED;
            goto cleanup;
        }

        if (gettimeofday(&now, NULL) < 0) {
            priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED;
            virReportSystemError(errno, "%s",
                                 _("cannot get time of day"));
            goto cleanup;
        }
        priv->jobInfo.timeElapsed = timeval_to_ms(now) - priv->jobStart;

        switch (status) {
        case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
            priv->jobInfo.type = VIR_DOMAIN_JOB_NONE;
            qemuReportError(VIR_ERR_OPERATION_FAILED,
                            _("%s: %s"), job, _("is not active"));
            break;

        case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE:
            priv->jobInfo.dataTotal = memTotal;
            priv->jobInfo.dataRemaining = memRemaining;
            priv->jobInfo.dataProcessed = memProcessed;

            priv->jobInfo.memTotal = memTotal;
            priv->jobInfo.memRemaining = memRemaining;
            priv->jobInfo.memProcessed = memProcessed;
            break;

        case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
            priv->jobInfo.type = VIR_DOMAIN_JOB_COMPLETED;
            ret = 0;
            break;

        case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
            priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED;
            qemuReportError(VIR_ERR_OPERATION_FAILED,
                            _("%s: %s"), job, _("unexpectedly failed"));
            break;

        case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
            priv->jobInfo.type = VIR_DOMAIN_JOB_CANCELLED;
            qemuReportError(VIR_ERR_OPERATION_FAILED,
                            _("%s: %s"), job, _("canceled by client"));
            break;
        }

        virDomainObjUnlock(vm);
        qemuDriverUnlock(driver);

        nanosleep(&ts, NULL);

        qemuDriverLock(driver);
        virDomainObjLock(vm);
    }

cleanup:
    return ret;
}


/* Prepare is the first step, and it runs on the destination host.
 *
 * This version starts an empty VM listening on a localhost TCP port, and
 * sets up the corresponding virStream to handle the incoming data.
 */
int
qemuMigrationPrepareTunnel(struct qemud_driver *driver,
                           virConnectPtr dconn,
                           virStreamPtr st,
                           const char *dname,
                           const char *dom_xml)
{
    virDomainDefPtr def = NULL;
    virDomainObjPtr vm = NULL;
    virDomainEventPtr event = NULL;
    int ret = -1;
    int internalret;
    int dataFD[2] = { -1, -1 };
    virBitmapPtr qemuCaps = NULL;
    qemuDomainObjPrivatePtr priv = NULL;
    struct timeval now;

    if (gettimeofday(&now, NULL) < 0) {
        virReportSystemError(errno, "%s",
                             _("cannot get time of day"));
        return -1;
    }

    /* Parse the domain XML. */
    if (!(def = virDomainDefParseString(driver->caps, dom_xml,
                                        VIR_DOMAIN_XML_INACTIVE)))
        goto cleanup;

    if (!qemuMigrationIsAllowed(def))
        goto cleanup;

    /* Target domain name, maybe renamed. */
    if (dname) {
        VIR_FREE(def->name);
        def->name = strdup(dname);
        if (def->name == NULL)
            goto cleanup;
    }

    if (virDomainObjIsDuplicate(&driver->domains, def, 1) < 0)
        goto cleanup;

    if (!(vm = virDomainAssignDef(driver->caps,
                                  &driver->domains,
                                  def, true))) {
        /* virDomainAssignDef already set the error */
        goto cleanup;
    }
    def = NULL;
    priv = vm->privateData;

    if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
        goto cleanup;
    priv->jobActive = QEMU_JOB_MIGRATION_OUT;

    /* Domain starts inactive, even if the domain XML had an id field. */
    vm->def->id = -1;

    if (pipe(dataFD) < 0 ||
        virSetCloseExec(dataFD[0]) < 0) {
        virReportSystemError(errno, "%s",
                             _("cannot create pipe for tunnelled migration"));
        goto endjob;
    }

    /* check that this qemu version supports the interactive exec */
    if (qemuCapsExtractVersionInfo(vm->def->emulator, vm->def->os.arch,
                                   NULL, &qemuCaps) < 0) {
        qemuReportError(VIR_ERR_INTERNAL_ERROR,
                        _("Cannot determine QEMU argv syntax %s"),
                        vm->def->emulator);
        goto endjob;
    }
    /* Start the QEMU daemon, with the same command-line arguments plus
     * -incoming stdin (which qemu_command might convert to exec:cat or fd:n)
     */
    internalret = qemuProcessStart(dconn, driver, vm, "stdin", true, dataFD[1],
                                   NULL, VIR_VM_OP_MIGRATE_IN_START);
    if (internalret < 0) {
        qemuAuditDomainStart(vm, "migrated", false);
        /* Note that we don't set an error here because qemuProcessStart
         * should have already done that.
         */
        if (!vm->persistent) {
            virDomainRemoveInactive(&driver->domains, vm);
            vm = NULL;
        }
        goto endjob;
    }

    if (virFDStreamOpen(st, dataFD[0]) < 0) {
        qemuAuditDomainStart(vm, "migrated", false);
        qemuProcessStop(driver, vm, 0);
        if (!vm->persistent) {
            if (qemuDomainObjEndJob(vm) > 0)
                virDomainRemoveInactive(&driver->domains, vm);
            vm = NULL;
        }
        virReportSystemError(errno, "%s",
                             _("cannot pass pipe for tunnelled migration"));
        goto endjob;
    }

    qemuAuditDomainStart(vm, "migrated", true);

    event = virDomainEventNewFromObj(vm,
                                     VIR_DOMAIN_EVENT_STARTED,
                                     VIR_DOMAIN_EVENT_STARTED_MIGRATED);
    ret = 0;

endjob:
    if (vm &&
        qemuDomainObjEndJob(vm) == 0)
        vm = NULL;

    /* We set a fake job active which is held across
     * API calls until the finish() call. This prevents
     * any other APIs being invoked while incoming
     * migration is taking place
     */
    if (vm &&
        virDomainObjIsActive(vm)) {
        priv->jobActive = QEMU_JOB_MIGRATION_IN;
        priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
        priv->jobStart = timeval_to_ms(now);
    }

cleanup:
    qemuCapsFree(qemuCaps);
    virDomainDefFree(def);
    VIR_FORCE_CLOSE(dataFD[0]);
    VIR_FORCE_CLOSE(dataFD[1]);
    if (vm)
        virDomainObjUnlock(vm);
    if (event)
        qemuDomainEventQueue(driver, event);
    qemuDriverUnlock(driver);
    return ret;
}