Exemple #1
0
int sbcl_bin_install(struct install_options* param) {
  int ret;
  char* home=configdir();
  char* impl=param->impl;
  char* version=param->version;
  char* impl_path= cat(home,impldir(param->arch,param->os,impl,version),NULL);
  char* src=param->expand_path;
  char* sbcl_home=cat(impl_path,"/lib/sbcl",NULL);
  char* install_root=q(impl_path);
  char* log_path=cat(home,"impls/log/",impl,"-",version,"/install.log",NULL);
  cond_printf(0,"Building %s/%s...",impl,version);
  ensure_directories_exist(impl_path);
  ensure_directories_exist(log_path);
  change_directory(src);
  setenv("SBCL_HOME",sbcl_home,1);
  setenv("INSTALL_ROOT",install_root,1);
  ret=System("(cat find-gnumake.sh; echo find_gnumake)|sh");
  if(ret!=0) {
    fprintf(stderr,"'make' command not available.\n");
    return 0;
  }
  ret=1;
  if(system_redirect("sh install.sh",log_path)==-1)
    ret=0;
  s(home),s(impl_path),s(sbcl_home),s(install_root),s(log_path);
  printf(" Done.\n");
  return ret;
}
Exemple #2
0
int start(struct install_options* param) {
  char *home=configdir(),*p;
  char *localprojects=cat(home,"local-projects/",NULL);
  setup_uid(1);
  ensure_directories_exist(localprojects);
  s(localprojects);
  if(installed_p(param)) {
    printf("%s/%s is already installed. Try (TBD) for the forced re-installation.\n",param->impl,param->version?param->version:"");
    return 0;
  }
  if(install_running_p(param)) {
    printf("It seems another installation process for $1/$2 is in progress somewhere in the system.\n");
    return 0;
  }
  p=cat(home,"tmp",SLASH,param->impl,param->version?"-":"",param->version?param->version:"",SLASH,NULL);
  ensure_directories_exist(p);
  s(p);

  p=cat(home,"tmp",SLASH,param->impl,param->version?"-":"",param->version?param->version:"",".lock",NULL);
  delete_at_exit(p);
  touch(p);

  s(p),s(home);
  return 1;
}
Exemple #3
0
int init_backing_store (const char * conf_instances_path, unsigned int conf_work_size_mb, unsigned int conf_cache_size_mb)
{
    logprintfl (EUCAINFO, "initializing backing store...\n");

    if (conf_instances_path == NULL) {
        logprintfl (EUCAERROR, "error: INSTANCE_PATH not specified\n");
        return ERROR;
    }
    safe_strncpy (instances_path, conf_instances_path, sizeof (instances_path));
    if (check_directory (instances_path)) {
	    logprintfl (EUCAERROR, "error: INSTANCE_PATH (%s) does not exist!\n", instances_path);
        return ERROR;
    }
    char cache_path [MAX_PATH]; snprintf (cache_path, sizeof (cache_path), "%s/cache", instances_path);
    if (ensure_directories_exist (cache_path, 0, NULL, NULL, BACKING_DIRECTORY_PERM) == -1) return ERROR;
    char work_path [MAX_PATH];  snprintf (work_path,  sizeof (work_path),  "%s/work", instances_path);
    if (ensure_directories_exist (work_path, 0, NULL, NULL, BACKING_DIRECTORY_PERM) == -1) return ERROR;
    unsigned long long cache_limit_blocks = conf_cache_size_mb * 2048; // convert MB to blocks
    unsigned long long work_limit_blocks  = conf_work_size_mb * 2048;
    if (work_limit_blocks==0) { // we take 0 as unlimited
        work_limit_blocks = ULLONG_MAX;
    }

    // by default we let blobstore pick the snapshot policy, which
    // will use device mapper if available, which is faster than copying
    blobstore_snapshot_t snapshot_policy = BLOBSTORE_SNAPSHOT_ANY;
    if (nc_state.disable_snapshots) {
        logprintfl (EUCAINFO, "if allocating storage, will avoid using snapshots\n");
        snapshot_policy = BLOBSTORE_SNAPSHOT_NONE;
    }
    blobstore_set_error_function ( &bs_errors );
    if (cache_limit_blocks) {
        cache_bs = blobstore_open (cache_path, cache_limit_blocks, BLOBSTORE_FLAG_CREAT, BLOBSTORE_FORMAT_DIRECTORY, BLOBSTORE_REVOCATION_LRU, snapshot_policy);
        if (cache_bs==NULL) {
            logprintfl (EUCAERROR, "ERROR: failed to open/create cache blobstore: %s\n", blobstore_get_error_str(blobstore_get_error()));
            return ERROR;
        }
    }
    work_bs = blobstore_open (work_path, work_limit_blocks, BLOBSTORE_FLAG_CREAT, BLOBSTORE_FORMAT_FILES, BLOBSTORE_REVOCATION_NONE, snapshot_policy);
    if (work_bs==NULL) {
        logprintfl (EUCAERROR, "ERROR: failed to open/create work blobstore: %s\n", blobstore_get_error_str(blobstore_get_error()));
        logprintfl (EUCAERROR, "ERROR: %s\n", blobstore_get_last_trace());
        blobstore_close (cache_bs);
        return ERROR;
    }

    // set the initial value of the semaphore to the number of 
    // disk-intensive operations that can run in parallel on this node
    if (nc_state.concurrent_disk_ops && (disk_sem = sem_alloc (nc_state.concurrent_disk_ops, "mutex")) == NULL) {
        logprintfl (EUCAERROR, "failed to create and initialize disk semaphore\n");
        return ERROR;
    }

    return OK;
}
Exemple #4
0
int sbcl_version_bin(struct install_options* param) {
  char* home=configdir();
  char* platforms_html=cat(home,"tmp",SLASH,"sbcl-bin.html",NULL);
  cond_printf(1,"sbcl_version_bin\n");
  ensure_directories_exist(platforms_html);
  param->version_not_specified=param->version?0:1;
  if(param->version_not_specified) {
    int ret;
    printf("No SBCL version specified. Downloading platform-table.html to see the available versions...\n");
    char* uri=get_opt("sbcl-bin-version-uri",0);
    ret=download_simple(uri?uri:PLATFORM_HTML_URI,platforms_html,0);
    if(ret!=0) {
      printf("Something wrong! Check the connection or sbcl.org is down. Download failed (Code=%d), tring the backup URL.\n",ret);
      ret=download_simple(PLATFORM_HTML_BACKUP_URI,platforms_html,0);
    }
    if(ret!=0) {
      printf("Download failed (Code=%d)\n",ret);
      return 0;
    }
    sbcl_bin_check_file(platforms_html);
    param->version=sbcl_bin(platforms_html,param->version_not_specified++);
  }else
    param->version=q(param->version);
  printf("Installing sbcl-bin/%s...\n",param->version);
  param->arch_in_archive_name=1;

  s(platforms_html),s(home);
  return 1;
}
Exemple #5
0
int create_instance_backing (ncInstance * instance)
{
    int ret = ERROR;
    virtualMachine * vm = &(instance->params);
    artifact * sentinel = NULL;

    // ensure instance directory exists
    set_path (instance->instancePath,    sizeof (instance->instancePath),    instance, NULL);
    if (ensure_directories_exist (instance->instancePath, 0, NULL, "root", BACKING_DIRECTORY_PERM) == -1)
        goto out;

    // set various instance-directory-relative paths in the instance struct
    set_path (instance->xmlFilePath,     sizeof (instance->xmlFilePath),     instance, "instance.xml");
    set_path (instance->libvirtFilePath, sizeof (instance->libvirtFilePath), instance, "libvirt.xml");
    set_path (instance->consoleFilePath, sizeof (instance->consoleFilePath), instance, "console.log");
    if (strstr (instance->platform, "windows")) {
        // generate the floppy file for windows instances
        if (makeWindowsFloppy (nc_state.home, instance->instancePath, instance->keyName, instance->instanceId)) {
            logprintfl (EUCAERROR, "[%s] error: could not create windows bootup script floppy\n", instance->instanceId);
            goto out;
        } else {
            set_path (instance->floppyFilePath, sizeof (instance->floppyFilePath), instance, "floppy");
        }
    }
    
    char work_prefix [1024]; // {userId}/{instanceId}
    set_id (instance, NULL, work_prefix, sizeof (work_prefix));
    
    // compute tree of dependencies
    sentinel = vbr_alloc_tree (vm, // the struct containing the VBR
                               FALSE, // for Xen and KVM we do not need to make disk bootable
                               TRUE, // make working copy of runtime-modifiable files
                               (instance->do_inject_key)?(instance->keyName):(NULL), // the SSH key
                               instance->instanceId); // ID is for logging
    if (sentinel == NULL) {
        logprintfl (EUCAERROR, "[%s] error: failed to prepare backing for instance\n", instance->instanceId);
        goto out;
    }

    sem_p (disk_sem);
    // download/create/combine the dependencies
    int rc = art_implement_tree (sentinel, work_bs, cache_bs, work_prefix, INSTANCE_PREP_TIMEOUT_USEC);
    sem_v (disk_sem);

    if (rc != OK) {
        logprintfl (EUCAERROR, "[%s] error: failed to implement backing for instance\n", instance->instanceId);
        goto out;
    }

    if (save_instance_struct (instance)) // update instance checkpoint now that the struct got updated
        goto out;

    ret = OK;
 out:
    if (sentinel)
        art_free (sentinel);
    return ret;
}
int sbcl_bin_expand(struct install_options* param) {
  char* impl=param->impl;
  char* version=q(param->version);
  int ret;
  char* home=configdir();
  char* arch= arch_(param);
  char* archive=cat(impl,"-",version,"-",arch,".msi",NULL);
  char* log_path=cat(home,"impls",SLASH,"log",SLASH,impl,"-",version,"-",arch,SLASH,"install.log",NULL);
  char* dist_path;
  int pos=position_char("-",impl);
  if(pos!=-1) {
    impl=subseq(impl,0,pos);
  }else
    impl=q(impl);
  dist_path=cat(home,"src",SLASH,impl,"-",version,"-",arch,SLASH,NULL);
  printf("Extracting the msi archive. %s to %s\n",archive,dist_path);
  archive=s_cat(q(home),q("archives"),q(SLASH),archive,NULL);
  delete_directory(dist_path,1);
  ensure_directories_exist(dist_path);
  ensure_directories_exist(log_path);
  if(dist_path[strlen(dist_path)-1]=='\\')
    dist_path[strlen(dist_path)-1]='\0';

  char* cmd=cat("msiexec.exe /a \"",
                archive,
                "\" targetdir=\"",
                dist_path,
                "\" /qn /lv ",
                "\"",
                log_path,
                "\"",
                NULL);
  cmd=cat("cmd /c \"",cmd,"\"",NULL);
  cond_printf(1,"msiexeccmd:%s\n",cmd);
  ret=System(cmd);
  s(impl);
  s(dist_path);
  s(log_path);
  s(archive);
  s(cmd),s(home),s(version),s(arch);
  return !ret;
}
Exemple #7
0
char* extract_command_str(int flags,const char *filename,int do_extract,const char* outputpath,char* type) {
  char* str;
  char* _uname_m=uname_m();
  char* _uname=uname();
  char* _homedir=configdir();
  char* exe=s_escape_string(cat(_homedir,"impls",SLASH,_uname_m,SLASH,_uname,SLASH,"7za",SLASH,"9.20",SLASH,"7za.exe",NULL));
  char *outputpath2=q(outputpath);
  char *filename2=q(filename);
  substitute_char('\\','/',outputpath2);
  outputpath2=s_escape_string(outputpath2);
  filename2=s_escape_string(filename2);
  ensure_directories_exist(outputpath2);
  if(strcmp(type,"gzip")==0 || strcmp(type,"bzip2")==0 || strcmp(type,"xz")==0) {
    str=cat("cmd /c \"",exe," ",do_extract?"x ":"l ",filename2," -so |",exe," x -ttar -si -y -o",outputpath2,"\"",NULL);
  }else if(strcmp(type,"7za")==0) {
    ensure_directories_exist(outputpath2);
    str=cat(exe," ",do_extract?"x":"t"," -y -o",outputpath2," ",filename2,NULL);
  }
  s(outputpath2),s(filename2),s(_homedir),s(_uname),s(_uname_m);
  return str;
}
Exemple #8
0
int sbcl_bin_expand(struct install_options* param) {
  cond_printf(1,"sbcl_bin_expand\n");
  char* argv[6]={"","-xf",NULL,"-C",NULL,NULL};
  char* archive=download_archive_name(param);
  char* dist_path=param->expand_path;
  char* home=configdir();
  printf("Extracting %s to %s\n",archive,dist_path);
  delete_directory(dist_path,1);
  ensure_directories_exist(dist_path);
  argv[2]=cat(home,"archives",SLASH,archive,NULL);
  argv[4]=cat(home,"src",SLASH,NULL);
  return !cmd_tar(array_stringlist(5,argv),NULL);
}
Exemple #9
0
int download(struct install_options* param) {
  char* home=configdir();
  char* url=install_impl->uri;
  char* archive_name=download_archive_name(param);
  char* impl_archive=cat(home,"archives",SLASH,archive_name,NULL);
  if(!file_exist_p(impl_archive)
     || get_opt("download.force",1)) {
    printf("Downloading %s\n",url);
    /*TBD proxy support... etc*/
    if(url) {
      ensure_directories_exist(impl_archive);
      int status = download_simple(url,impl_archive,0);
      if(status) {
        printf("Download Failed with status %d. See download_simple in src/download.c\n", status);
        return 0;
        /* fail */
      }
      s(url);
    }
  } else printf("Skip downloading %s\n",url);
  s(impl_archive),s(home);
  return 1;
}
Exemple #10
0
//!
//! Main entry point of the application
//!
//! @param[in] argc the number of parameter passed on the command line
//! @param[in] argv the list of arguments
//!
//! @return EUCA_OK on success or EUCA_ERROR on failure.
//!
int main(int argc, char *argv[])
{
    int i = 0;
    int ret = EUCA_OK;
    int nparams = 0;
    int ncmds = 0;
    char *eq = NULL;
    char *key = NULL;
    char *val = NULL;
    char euca_root[] = "";
    char argv_str[4096] = "";
    char *cmd_name = NULL;
    char pid_file[EUCA_MAX_PATH] = "";
    FILE *fp = NULL;
    pid_t pid = 0;
    artifact *root = NULL;
    blobstore *work_bs = NULL;
    blobstore *cache_bs = NULL;
    imager_param *cmd_params = NULL;

    log_fp_set(stderr); // imager logs to stderr so image data can be piped to stdout
    set_debug(print_debug);

    // initialize globals
    artifacts_map = map_create(10);

    // use $EUCALYPTUS env var if available
    euca_home = getenv(EUCALYPTUS_ENV_VAR_NAME);
    if (!euca_home) {
        euca_home = euca_root;
    }
    // save the command line into a buffer so it's easier to rerun it by hand
    argv_str[0] = '\0';
    for (i = 0; i < argc; i++) {
        strncat(argv_str, "\"", sizeof(argv_str) - strlen(argv_str) - 1);
        strncat(argv_str, argv[i], sizeof(argv_str) - strlen(argv_str) - 1);
        strncat(argv_str, "\" ", sizeof(argv_str) - strlen(argv_str) - 1);
    }

    // initialize dependencies
    if (vmdk_init() == EUCA_OK) {
        vddk_available = TRUE;
    }

    // parse command-line parameters
    while (*(++argv)) {
        eq = strstr(*argv, "=");       // all params have '='s
        if (eq == NULL) {              // it's a command
            // process previous command, if any
            if (validate_cmd(ncmds, cmd_name, cmd_params, *argv) != NULL)
                ncmds++;               // increment only if there was a previous command

            if (ncmds + 1 > MAX_REQS)
                err("too many commands (max is %d)", MAX_REQS);

            cmd_name = *argv;
            cmd_params = NULL;
            nparams = 0;
        } else {                       // this is a parameter
            if (strlen(eq) == 1)
                usage("parameters must have non-empty values");
            *eq = '\0';                // split key from value
            if (strlen(*argv) == 1)
                usage("parameters must have non-empty names");

            key = *argv;
            val = eq + 1;
            if (key == NULL || val == NULL)
                usage("syntax error in parameters");

            if (key[0] == '-')
                key++;                 // skip '-' if any

            if (key[0] == '-')
                key++;                 // skip second '-' if any

            if (cmd_name == NULL) {    // without a preceding command => global parameter
                set_global_parameter(key, val);
                continue;
            }

            if (cmd_params == NULL) {
                cmd_params = calloc(MAX_PARAMS + 1, sizeof(imager_param));  // +1 for terminating NULL
                if (!cmd_params)
                    err("calloc failed");
            }

            if (nparams + 1 > MAX_PARAMS)
                err("too many parameters (max is %d)", MAX_PARAMS);
            cmd_params[nparams].key = key;
            cmd_params[nparams].val = val;
            nparams++;
        }
    }

    if (validate_cmd(ncmds, cmd_name, cmd_params, *argv) != NULL)   // validate last command
        ncmds++;

    LOGINFO("verified all parameters for %d command(s)\n", ncmds);
    if (print_argv) {
        LOGDEBUG("argv[]: %s\n", argv_str);
    }
    // record PID, which may be used by VB to kill the imager process (e.g., in cancelBundling)
    pid = getpid();
    sprintf(pid_file, "%s/imager.pid", get_work_dir());
    if ((fp = fopen(pid_file, "w")) == NULL) {
        err("could not create pid file");
    } else {
        fprintf(fp, "%d", pid);
        fclose(fp);
    }

    // invoke the requirements checkers in the same order as on command line,
    // constructing the artifact tree originating at 'root'
    for (i = 0; i < ncmds; i++) {
        if (reqs[i].cmd->requirements != NULL) {
            art_set_instanceId(reqs[i].cmd->name);  // for logging
            if ((root = reqs[i].cmd->requirements(&reqs[i], root)) == NULL) // pass results of earlier checkers to later checkers
                err("failed while verifying requirements");
        }
    }

    // it is OK for root to be NULL at this point

    // see if work blobstore will be needed at any stage
    // and open or create the work blobstore
    if (root && tree_uses_blobstore(root)) {
        // set the function that will catch blobstore errors
        blobstore_set_error_function(&bs_errors);

        if (ensure_directories_exist(get_work_dir(), 0, NULL, NULL, BLOBSTORE_DIRECTORY_PERM) == -1)
            err("failed to open or create work directory %s", get_work_dir());

        work_bs = blobstore_open(get_work_dir(), get_work_limit() / 512, BLOBSTORE_FLAG_CREAT, BLOBSTORE_FORMAT_FILES, BLOBSTORE_REVOCATION_NONE, BLOBSTORE_SNAPSHOT_ANY);
        if (work_bs == NULL) {
            err("failed to open work blobstore: %s", blobstore_get_error_str(blobstore_get_error()));
        }
        // no point in fscking the work blobstore as it was just created
    }
    // see if cache blobstore will be needed at any stage
    if (root && tree_uses_cache(root)) {
        if (ensure_directories_exist(get_cache_dir(), 0, NULL, NULL, BLOBSTORE_DIRECTORY_PERM) == -1)
            err("failed to open or create cache directory %s", get_cache_dir());
        cache_bs = blobstore_open(get_cache_dir(), get_cache_limit() / 512, BLOBSTORE_FLAG_CREAT, BLOBSTORE_FORMAT_DIRECTORY, BLOBSTORE_REVOCATION_LRU, BLOBSTORE_SNAPSHOT_ANY);
        if (cache_bs == NULL) {
            blobstore_close(work_bs);
            err("failed to open cache blobstore: %s\n", blobstore_get_error_str(blobstore_get_error()));
        }

        if (blobstore_fsck(cache_bs, NULL)) //! @TODO: verify checksums?
            err("cache blobstore failed integrity check: %s", blobstore_get_error_str(blobstore_get_error()));

        if (stat_blobstore(get_cache_dir(), cache_bs))
            err("blobstore is unreadable");
    }
    // implement the artifact tree
    ret = EUCA_OK;
    if (root) {
        art_set_instanceId("imager");  // for logging
        ret = art_implement_tree(root, work_bs, cache_bs, NULL, INSTANCE_PREP_TIMEOUT_USEC);    // do all the work!
    }
    // invoke the cleaners for each command to tidy up disk space and memory allocations
    for (i = 0; i < ncmds; i++) {
        if (reqs[i].cmd->cleanup != NULL) {
            art_set_instanceId(reqs[i].cmd->name);  // for logging
            reqs[i].cmd->cleanup(&reqs[i], (i == (ncmds - 1)) ? (TRUE) : (FALSE));
        }
    }

    // free the artifact tree
    if (root) {
        if (tree_uses_blobstore(root)) {
            if (blobstore_fsck(work_bs, stale_blob_examiner)) { // will remove all blobs
                LOGWARN("failed to clean up work space: %s\n", blobstore_get_error_str(blobstore_get_error()));
            }
        }
        art_free(root);
    }
    clean_work_dir(work_bs);

    // indicate completion
    LOGINFO("imager done (exit code=%d)\n", ret);

    exit(ret);
}
//!
//! Implement the backing store for a given instance
//!
//! @param[in] instance pointer to the instance
//! @param[in] is_migration_dest
//!
//! @return EUCA_OK on success or EUCA_ERROR on failure
//!
//! @pre The instance parameter must not be NULL.
//!
//! @post
//!
int create_instance_backing(ncInstance * instance, boolean is_migration_dest)
{
    int rc = 0;
    int ret = EUCA_ERROR;
    virtualMachine *vm = &(instance->params);
    artifact *sentinel = NULL;
    char work_prefix[1024] = { 0 };    // {userId}/{instanceId}

    // set various instance-directory-relative paths in the instance struct
    set_instance_paths(instance);

    // ensure instance directory exists
    if (ensure_directories_exist(instance->instancePath, 0, NULL, "root", BACKING_DIRECTORY_PERM) == -1)
        goto out;

    if (strstr(instance->platform, "windows")) {
        // generate the floppy file for windows instances
        if (makeWindowsFloppy(nc_state.home, instance->instancePath, instance->keyName, instance->instanceId)) {
            LOGERROR("[%s] could not create windows bootup script floppy\n", instance->instanceId);
            goto out;
        } else {
            set_path(instance->floppyFilePath, sizeof(instance->floppyFilePath), instance, "floppy");
        }
    } else if (strlen(instance->instancePk) > 0) {  // TODO: credential floppy is limited to Linux instances ATM
        LOGDEBUG("[%s] creating floppy for instance credential\n", instance->instanceId);
        if (make_credential_floppy(nc_state.home, instance)) {
            LOGERROR("[%s] could not create credential floppy\n", instance->instanceId);
            goto out;
        } else {
            set_path(instance->floppyFilePath, sizeof(instance->floppyFilePath), instance, "floppy");
        }
    }

    set_id(instance, NULL, work_prefix, sizeof(work_prefix));

    // if this looks like a partition m1.small image, make it a bootable disk
    virtualMachine *vm2 = NULL;
    LOGDEBUG("vm->virtualBootRecordLen=%d\n", vm->virtualBootRecordLen);
    if (vm->virtualBootRecordLen == 5) {    // TODO: make this check more robust

        // as an experiment, construct a new VBR, without swap and ephemeral
        virtualMachine vm_copy;
        vm2 = &vm_copy;
        memcpy(vm2, vm, sizeof(virtualMachine));
        bzero(vm2->virtualBootRecord, EUCA_MAX_VBRS * sizeof(virtualBootRecord));
        vm2->virtualBootRecordLen = 0;

        virtualBootRecord *emi_vbr = NULL;
        for (int i = 0; i < EUCA_MAX_VBRS && i < vm->virtualBootRecordLen; i++) {
            virtualBootRecord *vbr = &(vm->virtualBootRecord[i]);
            if (vbr->type != NC_RESOURCE_KERNEL && vbr->type != NC_RESOURCE_RAMDISK && vbr->type != NC_RESOURCE_IMAGE)
                continue;
            if (vbr->type == NC_RESOURCE_IMAGE)
                emi_vbr = vbr;
            memcpy(vm2->virtualBootRecord + (vm2->virtualBootRecordLen++), vbr, sizeof(virtualBootRecord));
        }

        if (emi_vbr == NULL) {
            LOGERROR("[%s] failed to find EMI among VBR entries\n", instance->instanceId);
            goto out;
        }

        if (vbr_add_ascii("boot:none:104857600:ext3:sda2:none", vm2) != EUCA_OK) {
            LOGERROR("[%s] could not add a boot partition VBR entry\n", instance->instanceId);
            goto out;
        }
        if (vbr_parse(vm2, NULL) != EUCA_OK) {
            LOGERROR("[%s] could not parse the boot partition VBR entry\n", instance->instanceId);
            goto out;
        }
        // compute tree of dependencies
        sentinel = vbr_alloc_tree(vm2, // the struct containing the VBR
                                  TRUE, // we always make the disk bootable, for consistency
                                  TRUE, // make working copy of runtime-modifiable files
                                  is_migration_dest,    // tree of an instance on the migration destination
                                  (instance->do_inject_key) ? (instance->keyName) : (NULL), // the SSH key
                                  instance->instanceId);    // ID is for logging
        if (sentinel == NULL) {
            LOGERROR("[%s] failed to prepare backing for instance\n", instance->instanceId);
            goto out;
        }

        LOGDEBUG("disk size prior to tree implementation is = %lld\n", sentinel->deps[0]->size_bytes);
        long long right_disk_size = sentinel->deps[0]->size_bytes;

        sem_p(disk_sem);
        {
            // download/create/combine the dependencies
            rc = art_implement_tree(sentinel, work_bs, cache_bs, work_prefix, INSTANCE_PREP_TIMEOUT_USEC);
        }
        sem_v(disk_sem);

        if (rc != EUCA_OK) {
            LOGERROR("[%s] failed to implement backing for instance\n", instance->instanceId);
            goto out;
        }

        LOGDEBUG("[%s] created the initial bootable disk\n", instance->instanceId);

        /* option A starts */
        assert(emi_vbr);
        assert(sentinel->deps[0]);
        strcpy(emi_vbr->guestDeviceName, "sda");    // switch 'sda1' to 'sda' now that we've built the disk
        //emi_vbr->sizeBytes = sentinel->deps[0]->size_bytes; // update the size to match the disk
        emi_vbr->sizeBytes = right_disk_size;   // this is bad...
        LOGDEBUG("at boot disk creation time emi_vbr->sizeBytes = %lld\n", emi_vbr->sizeBytes);
        euca_strncpy(emi_vbr->id, sentinel->deps[0]->id, SMALL_CHAR_BUFFER_SIZE);   // change to the ID of the disk
        if (vbr_parse(vm, NULL) != EUCA_OK) {
            LOGERROR("[%s] could not parse the boot partition VBR entry\n", instance->instanceId);
            goto out;
        }
        emi_vbr->locationType = NC_LOCATION_NONE;   // i.e., it should already exist

        art_free(sentinel);
        /* option A end */

        /* option B starts *
           memcpy(vm, vm2, sizeof(virtualMachine));
           if (save_instance_struct(instance)) // update instance checkpoint now that the struct got updated
           goto out;
           ret = EUCA_OK;
           goto out;
           * option B ends */
    }
    // compute tree of dependencies
    sentinel = vbr_alloc_tree(vm,      // the struct containing the VBR
                              FALSE,   // if image had to be made bootable, that was done above
                              TRUE,    // make working copy of runtime-modifiable files
                              is_migration_dest,    // tree of an instance on the migration destination
                              (instance->do_inject_key) ? (instance->keyName) : (NULL), // the SSH key
                              instance->instanceId);    // ID is for logging
    if (sentinel == NULL) {
        LOGERROR("[%s] failed to prepare extended backing for instance\n", instance->instanceId);
        goto out;
    }

    sem_p(disk_sem);
    {
        // download/create/combine the dependencies
        rc = art_implement_tree(sentinel, work_bs, cache_bs, work_prefix, INSTANCE_PREP_TIMEOUT_USEC);
    }
    sem_v(disk_sem);

    if (rc != EUCA_OK) {
        LOGERROR("[%s] failed to implement backing for instance\n", instance->instanceId);
        goto out;
    }

    if (save_instance_struct(instance)) // update instance checkpoint now that the struct got updated
        goto out;

    ret = EUCA_OK;

out:
    if (sentinel)
        art_free(sentinel);
    return (ret);
}
//!
//! Initialize the backing store. Called during initialization of node controller.
//!
//! @param[in] conf_instances_path path to where the instances information are stored
//! @param[in] conf_work_size_mb the work blobstore size limit in MB (if 0 then unlimitted)
//! @param[in] conf_cache_size_mb the cache blobstore size limit in MB (if 0 then cache isn't used)
//!
//! @return EUCA_OK on success or the following error codes:
//!         \li EUCA_INVALID_ERROR: if any parameter does not meet the preconditions
//!         \li EUCA_ACCESS_ERROR: if we fail to access our cache and work directories
//!         \li EUCA_PERMISSION_ERROR: if we fail to create the cache or work stores.
//!
//! @pre The conf_instances_path field must not be NULL
//!
//! @post On success, the backing store module is initialized and the following happened:
//!       \li our global instance_path variable is set with the given conf_instance_path
//!       \li the work blobstore is created and our global work_bs variable is set
//!       \li the cache blobstore is created if necessary and the cache_bs variable is set
//!       \li the disk semaphore is created if necessary
//!
int init_backing_store(const char *conf_instances_path, unsigned int conf_work_size_mb, unsigned int conf_cache_size_mb)
{
    char cache_path[EUCA_MAX_PATH] = "";
    char work_path[EUCA_MAX_PATH] = "";
    unsigned long long cache_limit_blocks = 0;
    unsigned long long work_limit_blocks = 0;
    blobstore_snapshot_t snapshot_policy = BLOBSTORE_SNAPSHOT_ANY;

    LOGINFO("initializing backing store...\n");

    // Make sure we have a valid intance path passed to us
    if (conf_instances_path == NULL) {
        LOGERROR("INSTANCE_PATH not specified\n");
        return (EUCA_INVALID_ERROR);
    }
    // Set our global instance_path variable with the content of conf_instance_path
    euca_strncpy(instances_path, conf_instances_path, sizeof(instances_path));
    if (check_directory(instances_path)) {
        LOGERROR("INSTANCE_PATH (%s) does not exist!\n", instances_path);
        return (EUCA_ACCESS_ERROR);
    }
    // Check if our cache path exist. If not it should get crated
    snprintf(cache_path, sizeof(cache_path), "%s/cache", instances_path);
    if (ensure_directories_exist(cache_path, 0, NULL, NULL, BACKING_DIRECTORY_PERM) == -1)
        return (EUCA_ACCESS_ERROR);

    // Check if our work path exist. If not it should get crated
    snprintf(work_path, sizeof(work_path), "%s/work", instances_path);
    if (ensure_directories_exist(work_path, 0, NULL, NULL, BACKING_DIRECTORY_PERM) == -1)
        return (EUCA_ACCESS_ERROR);

    // convert MB to blocks
    cache_limit_blocks = (unsigned long long)conf_cache_size_mb *2048;
    work_limit_blocks = (unsigned long long)conf_work_size_mb *2048;

    // we take 0 as unlimited
    if (work_limit_blocks == 0) {
        work_limit_blocks = ULLONG_MAX;
    }
    // by default we let blobstore pick the snapshot policy, which
    // will use device mapper if available, which is faster than copying
    snapshot_policy = BLOBSTORE_SNAPSHOT_ANY;
    if (nc_state.disable_snapshots) {
        LOGINFO("if allocating storage, will avoid using snapshots\n");
        snapshot_policy = BLOBSTORE_SNAPSHOT_NONE;
    }
    // Set the backing store error callback function
    blobstore_set_error_function(&bs_errors);

    // Do we need to create a cache blobstore
    if (cache_limit_blocks) {
        cache_bs = blobstore_open(cache_path, cache_limit_blocks, BLOBSTORE_FLAG_CREAT, BLOBSTORE_FORMAT_DIRECTORY, BLOBSTORE_REVOCATION_LRU, snapshot_policy);
        if (cache_bs == NULL) {
            LOGERROR("failed to open/create cache blobstore: %s\n", blobstore_get_error_str(blobstore_get_error()));
            return (EUCA_PERMISSION_ERROR);
        }
    }
    // Lets open the work blobstore
    work_bs = blobstore_open(work_path, work_limit_blocks, BLOBSTORE_FLAG_CREAT, BLOBSTORE_FORMAT_FILES, BLOBSTORE_REVOCATION_NONE, snapshot_policy);
    if (work_bs == NULL) {
        LOGERROR("failed to open/create work blobstore: %s\n", blobstore_get_error_str(blobstore_get_error()));
        LOGERROR("%s\n", blobstore_get_last_trace());
        BLOBSTORE_CLOSE(cache_bs);
        return (EUCA_PERMISSION_ERROR);
    }
    // set the initial value of the semaphore to the number of
    // disk-intensive operations that can run in parallel on this node
    if (nc_state.concurrent_disk_ops && ((disk_sem = sem_alloc(nc_state.concurrent_disk_ops, IPC_MUTEX_SEMAPHORE)) == NULL)) {
        LOGERROR("failed to create and initialize disk semaphore\n");
        return (EUCA_PERMISSION_ERROR);
    }

    return (EUCA_OK);
}
Exemple #13
0
int
open_file(const char *file, int *can_write, const char *header)
{
	int fd;
	struct stat s;

	if (ensure_directories_exist(file, 0700))
		return -1;
	*can_write = 1;
	fd = open(file, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
	if (fd < 0) {
		if (errno == EROFS) {
			*can_write = 0;
			condlog(3, "Cannot open file [%s] read/write. "
				" trying readonly", file);
			fd = open(file, O_RDONLY);
			if (fd < 0) {
				condlog(0, "Cannot open file [%s] "
					"readonly : %s", file, strerror(errno));
				return -1;
			}
		}
		else {
			condlog(0, "Cannot open file [%s] : %s", file,
				strerror(errno));
			return -1;
		}
	}
	if (*can_write && lock_file(fd, file) < 0)
		goto fail;

	memset(&s, 0, sizeof(s));
	if (fstat(fd, &s) < 0){
		condlog(0, "Cannot stat file %s : %s", file, strerror(errno));
		goto fail;
	}
	if (s.st_size == 0) {
		if (*can_write == 0)
			goto fail;
		/* If file is empty, write the header */
		size_t len = strlen(header);
		if (write(fd, header, len) != len) {
			condlog(0,
				"Cannot write header to file %s : %s", file,
				strerror(errno));
			/* cleanup partially written header */
			if (ftruncate(fd, 0))
				condlog(0, "Cannot truncate header : %s",
					strerror(errno));
			goto fail;
		}
		fsync(fd);
		condlog(3, "Initialized new file [%s]", file);
	}

	return fd;

fail:
	close(fd);
	return -1;
}
Exemple #14
0
//!
//! Implement the backing store for a given instance
//!
//! @param[in] instance pointer to the instance
//! @param[in] is_migration_dest
//!
//! @return EUCA_OK on success or EUCA_ERROR on failure
//!
//! @pre The instance parameter must not be NULL.
//!
//! @post
//!
int create_instance_backing(ncInstance * instance, boolean is_migration_dest)
{
    int rc = 0;
    int ret = EUCA_ERROR;
    virtualMachine *vm = &(instance->params);
    artifact *sentinel = NULL;
    char work_prefix[1024] = { 0 };    // {userId}/{instanceId}

    // set various instance-directory-relative paths in the instance struct
    set_instance_paths(instance);

    // ensure instance directory exists
    if (ensure_directories_exist(instance->instancePath, 0, NULL, "root", BACKING_DIRECTORY_PERM) == -1)
        goto out;

    if (strstr(instance->platform, "windows")) {
        // generate the floppy file for windows instances
        if (makeWindowsFloppy(nc_state.home, instance->instancePath, instance->keyName, instance->instanceId)) {
            LOGERROR("[%s] could not create windows bootup script floppy\n", instance->instanceId);
            goto out;
        } else {
            set_path(instance->floppyFilePath, sizeof(instance->floppyFilePath), instance, "floppy");
        }
    }else if (instance->instancePk != NULL && strlen(instance->instancePk) > 0) {  // TODO: credential floppy is limited to Linux instances ATM
        LOGDEBUG("[%s] creating floppy for instance credential\n", instance->instanceId);
        if (make_credential_floppy(nc_state.home, instance)) {
            LOGERROR("[%s] could not create credential floppy\n", instance->instanceId);
            goto out;
        } else {
            set_path(instance->floppyFilePath, sizeof(instance->floppyFilePath), instance, "floppy");
        }
    }

    set_id(instance, NULL, work_prefix, sizeof(work_prefix));

    // compute tree of dependencies
    sentinel = vbr_alloc_tree(vm,      // the struct containing the VBR
                              FALSE,   // for Xen and KVM we do not need to make disk bootable
                              TRUE,    // make working copy of runtime-modifiable files
                              is_migration_dest,    // tree of an instance on the migration destination
                              (instance->do_inject_key) ? (instance->keyName) : (NULL), // the SSH key
                              instance->instanceId);    // ID is for logging
    if (sentinel == NULL) {
        LOGERROR("[%s] failed to prepare backing for instance\n", instance->instanceId);
        goto out;
    }

    sem_p(disk_sem);
    {
        // download/create/combine the dependencies
        rc = art_implement_tree(sentinel, work_bs, cache_bs, work_prefix, INSTANCE_PREP_TIMEOUT_USEC);
    }
    sem_v(disk_sem);

    if (rc != EUCA_OK) {
        LOGERROR("[%s] failed to implement backing for instance\n", instance->instanceId);
        goto out;
    }

    if (save_instance_struct(instance)) // update instance checkpoint now that the struct got updated
        goto out;

    ret = EUCA_OK;

out:
    if (sentinel)
        art_free(sentinel);
    return (ret);
}