Beispiel #1
0
//!
//! Defines the thread that does the actual migration of an instance off the source.
//!
//! @param[in] arg a transparent pointer to the argument passed to this thread handler
//!
//! @return Always return NULL
//!
static void *migrating_thread(void *arg)
{
    ncInstance *instance = ((ncInstance *) arg);
    virDomainPtr dom = NULL;
    virConnectPtr conn = NULL;
    int migration_error = 0;

    LOGTRACE("invoked for %s\n", instance->instanceId);

    if ((conn = lock_hypervisor_conn()) == NULL) {
        LOGERROR("[%s] cannot migrate instance %s (failed to connect to hypervisor), giving up and rolling back.\n", instance->instanceId, instance->instanceId);
        migration_error++;
        goto out;
    } else {
        LOGTRACE("[%s] connected to hypervisor\n", instance->instanceId);
    }

    dom = virDomainLookupByName(conn, instance->instanceId);
    if (dom == NULL) {
        LOGERROR("[%s] cannot migrate instance %s (failed to find domain), giving up and rolling back.\n", instance->instanceId, instance->instanceId);
        migration_error++;
        goto out;
    }

    char duri[1024];
    snprintf(duri, sizeof(duri), "qemu+tls://%s/system", instance->migration_dst);

    virConnectPtr dconn = NULL;

    LOGDEBUG("[%s] connecting to remote hypervisor at '%s'\n", instance->instanceId, duri);
    dconn = virConnectOpen(duri);
    if (dconn == NULL) {
        LOGWARN("[%s] cannot migrate instance using TLS (failed to connect to remote), retrying using SSH.\n", instance->instanceId);
        snprintf(duri, sizeof(duri), "qemu+ssh://%s/system", instance->migration_dst);
        LOGDEBUG("[%s] connecting to remote hypervisor at '%s'\n", instance->instanceId, duri);
        dconn = virConnectOpen(duri);
        if (dconn == NULL) {
            LOGERROR("[%s] cannot migrate instance using TLS or SSH (failed to connect to remote), giving up and rolling back.\n", instance->instanceId);
            migration_error++;
            goto out;
        }
    }

    LOGINFO("[%s] migrating instance\n", instance->instanceId);
    virDomain *ddom = virDomainMigrate(dom,
                                       dconn,
                                       VIR_MIGRATE_LIVE | VIR_MIGRATE_NON_SHARED_DISK,
                                       NULL,    // new name on destination (optional)
                                       NULL,    // destination URI as seen from source (optional)
                                       0L); // bandwidth limitation (0 => unlimited)
    if (ddom == NULL) {
        LOGERROR("[%s] cannot migrate instance, giving up and rolling back.\n", instance->instanceId);
        migration_error++;
        goto out;
    } else {
        LOGINFO("[%s] instance migrated\n", instance->instanceId);
    }
    virDomainFree(ddom);
    virConnectClose(dconn);

out:
    if (dom)
        virDomainFree(dom);

    if (conn != NULL)
        unlock_hypervisor_conn();

    sem_p(inst_sem);
    LOGDEBUG("%d outgoing migrations still active\n", --outgoing_migrations_in_progress);
    if (migration_error) {
        migration_rollback(instance);
    } else {
        // If this is set to NOT_MIGRATING here, it's briefly possible for
        // both the source and destination nodes to report the same instance
        // as Extant/NOT_MIGRATING, which is confusing!
        instance->migration_state = MIGRATION_CLEANING;
        save_instance_struct(instance);
        copy_instances();
    }
    sem_v(inst_sem);

    LOGDEBUG("done\n");
    unset_corrid(get_corrid());
    return NULL;
}
Beispiel #2
0
static int doMigrateInstance(struct nc_state_t *nc, ncMetadata *meta, char *instanceId, char *target) 
{
  int ret=OK;
  char remoteURI[CHAR_BUFFER_SIZE];
  virConnectPtr *conn, dst;
  ncInstance *instance;

  logprintfl(EUCADEBUG, "doMigrateInstance() in default handler invoked\n");
  conn = check_hypervisor_conn();
 
  if (!conn) {
    logprintfl(EUCAERROR, "doMigrateInstance() cannot connect to hypervisor\n");
  }

  if (target || !strcmp(target, "")) {
    getRemoteURI(nc, target, remoteURI, CHAR_BUFFER_SIZE);
    logprintfl(EUCADEBUG, "doMigrateInstance(): connecting to remote hypervisor\n");
    dst = virConnectOpen(remoteURI);
    if (!dst) {
      logprintfl(EUCAERROR, "doMigrateInstance(): Connection to remote Hypervisor failed (URI: %s)\n", remoteURI);
    } else {
      logprintfl(EUCADEBUG, "doMigrateInstance(): Connected to %s\n", remoteURI);
    }
  } else {
    logprintfl(EUCAERROR, "doMigrateInstance(): no migration target\n");
    return (ERROR);
  }

  sem_p (inst_sem); 
  instance = find_instance(&global_instances, instanceId);
  sem_v (inst_sem);
  if (instance == NULL) {
    logprintfl(EUCAERROR, "doMigrateInstance(): instance not found\n");
    return (NOT_FOUND);
  }

  if (conn && dst) {
    sem_p(hyp_sem);
    virDomainPtr dom = virDomainLookupByName(*conn, instanceId);
    sem_v(hyp_sem);
    
    if (dom) {
      sem_p (hyp_sem);
      if (virDomainMigrate (dom, dst, VIR_MIGRATE_LIVE, NULL, NULL, 0))
	logprintfl (EUCAINFO, "doMigrateInstance(): migrated instance %s\n", instanceId);
      else 
	ret = ERROR;
      sem_v (hyp_sem);
    }
    else {
      logprintfl (EUCAWARN, "warning: domain %s to be migrated not running on hypervisor\n", instanceId);
      ret = ERROR;
    }
  }
 else {
   logprintfl(EUCAERROR, "doMigrateInstance(): Migrating %s failed\n", instanceId);
   ret = ERROR;
  }
 
  if (ret == OK) {
    sem_p (inst_sem); 
    instance = find_instance(&global_instances, instanceId);
    logprintfl(EUCADEBUG, "doMigrateInstance(): removing instance from global_instances\n");
    if (remove_instance (&global_instances, instance) != OK) {
      logprintfl(EUCAERROR, "doMigrateInstance(): cannot remove instance from global_instances\n");
      ret = ERROR;
    }
    sem_v (inst_sem);
  }
 
  return (ret);
}