Пример #1
0
//!
//! Sets our global instances list and checks the integrity of our work and
//! cache blobstore.
//!
//! @param[in] global_instances a pointer to our list of instances.
//!
//! @return EUCA_OK on success or EUCA_ERROR if any error occured.
//!
//! @see blobstore_fsck()
//!
//! @pre  The global_instances should not be NULL.
//!
//! @post The global instances variable will be set to global_instances regardless of the
//!       outcome of this function. If any error occured while checking the integrity of
//!       the work blobstore, the cache blobstore will be closed.
//!
int check_backing_store(bunchOfInstances ** global_instances)
{
    instances = global_instances;

    if (work_bs) {
        if (blobstore_fsck(work_bs, stale_blob_examiner)) {
            LOGERROR("work directory failed integrity check: %s\n", blobstore_get_error_str(blobstore_get_error()));
            //! @todo CHUCK -> Ok to close cache_bs and not set to NULL???
            BLOBSTORE_CLOSE(cache_bs);
            return (EUCA_ERROR);
        }
    }

    if (cache_bs) {
        if (blobstore_fsck(cache_bs, NULL)) {
            //! @TODO verify checksums?
            LOGERROR("cache failed integrity check: %s\n", blobstore_get_error_str(blobstore_get_error()));
            return (EUCA_ERROR);
        }
    }
    return (EUCA_OK);
}
Пример #2
0
//!
//! Main entry point of the application
//!
//! @param[in] argc the number of parameter passed on the command line
//! @param[in] argv the list of arguments
//!
//! @return EUCA_OK on success or EUCA_ERROR on failure.
//!
int main(int argc, char *argv[])
{
    int i = 0;
    int ret = EUCA_OK;
    int nparams = 0;
    int ncmds = 0;
    char *eq = NULL;
    char *key = NULL;
    char *val = NULL;
    char euca_root[] = "";
    char argv_str[4096] = "";
    char *cmd_name = NULL;
    char pid_file[EUCA_MAX_PATH] = "";
    FILE *fp = NULL;
    pid_t pid = 0;
    artifact *root = NULL;
    blobstore *work_bs = NULL;
    blobstore *cache_bs = NULL;
    imager_param *cmd_params = NULL;

    log_fp_set(stderr); // imager logs to stderr so image data can be piped to stdout
    set_debug(print_debug);

    // initialize globals
    artifacts_map = map_create(10);

    // use $EUCALYPTUS env var if available
    euca_home = getenv(EUCALYPTUS_ENV_VAR_NAME);
    if (!euca_home) {
        euca_home = euca_root;
    }
    // save the command line into a buffer so it's easier to rerun it by hand
    argv_str[0] = '\0';
    for (i = 0; i < argc; i++) {
        strncat(argv_str, "\"", sizeof(argv_str) - strlen(argv_str) - 1);
        strncat(argv_str, argv[i], sizeof(argv_str) - strlen(argv_str) - 1);
        strncat(argv_str, "\" ", sizeof(argv_str) - strlen(argv_str) - 1);
    }

    // initialize dependencies
    if (vmdk_init() == EUCA_OK) {
        vddk_available = TRUE;
    }

    // parse command-line parameters
    while (*(++argv)) {
        eq = strstr(*argv, "=");       // all params have '='s
        if (eq == NULL) {              // it's a command
            // process previous command, if any
            if (validate_cmd(ncmds, cmd_name, cmd_params, *argv) != NULL)
                ncmds++;               // increment only if there was a previous command

            if (ncmds + 1 > MAX_REQS)
                err("too many commands (max is %d)", MAX_REQS);

            cmd_name = *argv;
            cmd_params = NULL;
            nparams = 0;
        } else {                       // this is a parameter
            if (strlen(eq) == 1)
                usage("parameters must have non-empty values");
            *eq = '\0';                // split key from value
            if (strlen(*argv) == 1)
                usage("parameters must have non-empty names");

            key = *argv;
            val = eq + 1;
            if (key == NULL || val == NULL)
                usage("syntax error in parameters");

            if (key[0] == '-')
                key++;                 // skip '-' if any

            if (key[0] == '-')
                key++;                 // skip second '-' if any

            if (cmd_name == NULL) {    // without a preceding command => global parameter
                set_global_parameter(key, val);
                continue;
            }

            if (cmd_params == NULL) {
                cmd_params = calloc(MAX_PARAMS + 1, sizeof(imager_param));  // +1 for terminating NULL
                if (!cmd_params)
                    err("calloc failed");
            }

            if (nparams + 1 > MAX_PARAMS)
                err("too many parameters (max is %d)", MAX_PARAMS);
            cmd_params[nparams].key = key;
            cmd_params[nparams].val = val;
            nparams++;
        }
    }

    if (validate_cmd(ncmds, cmd_name, cmd_params, *argv) != NULL)   // validate last command
        ncmds++;

    LOGINFO("verified all parameters for %d command(s)\n", ncmds);
    if (print_argv) {
        LOGDEBUG("argv[]: %s\n", argv_str);
    }
    // record PID, which may be used by VB to kill the imager process (e.g., in cancelBundling)
    pid = getpid();
    sprintf(pid_file, "%s/imager.pid", get_work_dir());
    if ((fp = fopen(pid_file, "w")) == NULL) {
        err("could not create pid file");
    } else {
        fprintf(fp, "%d", pid);
        fclose(fp);
    }

    // invoke the requirements checkers in the same order as on command line,
    // constructing the artifact tree originating at 'root'
    for (i = 0; i < ncmds; i++) {
        if (reqs[i].cmd->requirements != NULL) {
            art_set_instanceId(reqs[i].cmd->name);  // for logging
            if ((root = reqs[i].cmd->requirements(&reqs[i], root)) == NULL) // pass results of earlier checkers to later checkers
                err("failed while verifying requirements");
        }
    }

    // it is OK for root to be NULL at this point

    // see if work blobstore will be needed at any stage
    // and open or create the work blobstore
    if (root && tree_uses_blobstore(root)) {
        // set the function that will catch blobstore errors
        blobstore_set_error_function(&bs_errors);

        if (ensure_directories_exist(get_work_dir(), 0, NULL, NULL, BLOBSTORE_DIRECTORY_PERM) == -1)
            err("failed to open or create work directory %s", get_work_dir());

        work_bs = blobstore_open(get_work_dir(), get_work_limit() / 512, BLOBSTORE_FLAG_CREAT, BLOBSTORE_FORMAT_FILES, BLOBSTORE_REVOCATION_NONE, BLOBSTORE_SNAPSHOT_ANY);
        if (work_bs == NULL) {
            err("failed to open work blobstore: %s", blobstore_get_error_str(blobstore_get_error()));
        }
        // no point in fscking the work blobstore as it was just created
    }
    // see if cache blobstore will be needed at any stage
    if (root && tree_uses_cache(root)) {
        if (ensure_directories_exist(get_cache_dir(), 0, NULL, NULL, BLOBSTORE_DIRECTORY_PERM) == -1)
            err("failed to open or create cache directory %s", get_cache_dir());
        cache_bs = blobstore_open(get_cache_dir(), get_cache_limit() / 512, BLOBSTORE_FLAG_CREAT, BLOBSTORE_FORMAT_DIRECTORY, BLOBSTORE_REVOCATION_LRU, BLOBSTORE_SNAPSHOT_ANY);
        if (cache_bs == NULL) {
            blobstore_close(work_bs);
            err("failed to open cache blobstore: %s\n", blobstore_get_error_str(blobstore_get_error()));
        }

        if (blobstore_fsck(cache_bs, NULL)) //! @TODO: verify checksums?
            err("cache blobstore failed integrity check: %s", blobstore_get_error_str(blobstore_get_error()));

        if (stat_blobstore(get_cache_dir(), cache_bs))
            err("blobstore is unreadable");
    }
    // implement the artifact tree
    ret = EUCA_OK;
    if (root) {
        art_set_instanceId("imager");  // for logging
        ret = art_implement_tree(root, work_bs, cache_bs, NULL, INSTANCE_PREP_TIMEOUT_USEC);    // do all the work!
    }
    // invoke the cleaners for each command to tidy up disk space and memory allocations
    for (i = 0; i < ncmds; i++) {
        if (reqs[i].cmd->cleanup != NULL) {
            art_set_instanceId(reqs[i].cmd->name);  // for logging
            reqs[i].cmd->cleanup(&reqs[i], (i == (ncmds - 1)) ? (TRUE) : (FALSE));
        }
    }

    // free the artifact tree
    if (root) {
        if (tree_uses_blobstore(root)) {
            if (blobstore_fsck(work_bs, stale_blob_examiner)) { // will remove all blobs
                LOGWARN("failed to clean up work space: %s\n", blobstore_get_error_str(blobstore_get_error()));
            }
        }
        art_free(root);
    }
    clean_work_dir(work_bs);

    // indicate completion
    LOGINFO("imager done (exit code=%d)\n", ret);

    exit(ret);
}
Пример #3
0
//!
//! Initialize the backing store. Called during initialization of node controller.
//!
//! @param[in] conf_instances_path path to where the instances information are stored
//! @param[in] conf_work_size_mb the work blobstore size limit in MB (if 0 then unlimitted)
//! @param[in] conf_cache_size_mb the cache blobstore size limit in MB (if 0 then cache isn't used)
//!
//! @return EUCA_OK on success or the following error codes:
//!         \li EUCA_INVALID_ERROR: if any parameter does not meet the preconditions
//!         \li EUCA_ACCESS_ERROR: if we fail to access our cache and work directories
//!         \li EUCA_PERMISSION_ERROR: if we fail to create the cache or work stores.
//!
//! @pre The conf_instances_path field must not be NULL
//!
//! @post On success, the backing store module is initialized and the following happened:
//!       \li our global instance_path variable is set with the given conf_instance_path
//!       \li the work blobstore is created and our global work_bs variable is set
//!       \li the cache blobstore is created if necessary and the cache_bs variable is set
//!       \li the disk semaphore is created if necessary
//!
int init_backing_store(const char *conf_instances_path, unsigned int conf_work_size_mb, unsigned int conf_cache_size_mb)
{
    char cache_path[EUCA_MAX_PATH] = "";
    char work_path[EUCA_MAX_PATH] = "";
    unsigned long long cache_limit_blocks = 0;
    unsigned long long work_limit_blocks = 0;
    blobstore_snapshot_t snapshot_policy = BLOBSTORE_SNAPSHOT_ANY;

    LOGINFO("initializing backing store...\n");

    // Make sure we have a valid intance path passed to us
    if (conf_instances_path == NULL) {
        LOGERROR("INSTANCE_PATH not specified\n");
        return (EUCA_INVALID_ERROR);
    }
    // Set our global instance_path variable with the content of conf_instance_path
    euca_strncpy(instances_path, conf_instances_path, sizeof(instances_path));
    if (check_directory(instances_path)) {
        LOGERROR("INSTANCE_PATH (%s) does not exist!\n", instances_path);
        return (EUCA_ACCESS_ERROR);
    }
    // Check if our cache path exist. If not it should get crated
    snprintf(cache_path, sizeof(cache_path), "%s/cache", instances_path);
    if (ensure_directories_exist(cache_path, 0, NULL, NULL, BACKING_DIRECTORY_PERM) == -1)
        return (EUCA_ACCESS_ERROR);

    // Check if our work path exist. If not it should get crated
    snprintf(work_path, sizeof(work_path), "%s/work", instances_path);
    if (ensure_directories_exist(work_path, 0, NULL, NULL, BACKING_DIRECTORY_PERM) == -1)
        return (EUCA_ACCESS_ERROR);

    // convert MB to blocks
    cache_limit_blocks = (unsigned long long)conf_cache_size_mb *2048;
    work_limit_blocks = (unsigned long long)conf_work_size_mb *2048;

    // we take 0 as unlimited
    if (work_limit_blocks == 0) {
        work_limit_blocks = ULLONG_MAX;
    }
    // by default we let blobstore pick the snapshot policy, which
    // will use device mapper if available, which is faster than copying
    snapshot_policy = BLOBSTORE_SNAPSHOT_ANY;
    if (nc_state.disable_snapshots) {
        LOGINFO("if allocating storage, will avoid using snapshots\n");
        snapshot_policy = BLOBSTORE_SNAPSHOT_NONE;
    }
    // Set the backing store error callback function
    blobstore_set_error_function(&bs_errors);

    // Do we need to create a cache blobstore
    if (cache_limit_blocks) {
        cache_bs = blobstore_open(cache_path, cache_limit_blocks, BLOBSTORE_FLAG_CREAT, BLOBSTORE_FORMAT_DIRECTORY, BLOBSTORE_REVOCATION_LRU, snapshot_policy);
        if (cache_bs == NULL) {
            LOGERROR("failed to open/create cache blobstore: %s\n", blobstore_get_error_str(blobstore_get_error()));
            return (EUCA_PERMISSION_ERROR);
        }
    }
    // Lets open the work blobstore
    work_bs = blobstore_open(work_path, work_limit_blocks, BLOBSTORE_FLAG_CREAT, BLOBSTORE_FORMAT_FILES, BLOBSTORE_REVOCATION_NONE, snapshot_policy);
    if (work_bs == NULL) {
        LOGERROR("failed to open/create work blobstore: %s\n", blobstore_get_error_str(blobstore_get_error()));
        LOGERROR("%s\n", blobstore_get_last_trace());
        BLOBSTORE_CLOSE(cache_bs);
        return (EUCA_PERMISSION_ERROR);
    }
    // set the initial value of the semaphore to the number of
    // disk-intensive operations that can run in parallel on this node
    if (nc_state.concurrent_disk_ops && ((disk_sem = sem_alloc(nc_state.concurrent_disk_ops, IPC_MUTEX_SEMAPHORE)) == NULL)) {
        LOGERROR("failed to create and initialize disk semaphore\n");
        return (EUCA_PERMISSION_ERROR);
    }

    return (EUCA_OK);
}
Пример #4
0
int init_backing_store (const char * conf_instances_path, unsigned int conf_work_size_mb, unsigned int conf_cache_size_mb)
{
    logprintfl (EUCAINFO, "initializing backing store...\n");

    if (conf_instances_path == NULL) {
        logprintfl (EUCAERROR, "error: INSTANCE_PATH not specified\n");
        return ERROR;
    }
    safe_strncpy (instances_path, conf_instances_path, sizeof (instances_path));
    if (check_directory (instances_path)) {
	    logprintfl (EUCAERROR, "error: INSTANCE_PATH (%s) does not exist!\n", instances_path);
        return ERROR;
    }
    char cache_path [MAX_PATH]; snprintf (cache_path, sizeof (cache_path), "%s/cache", instances_path);
    if (ensure_directories_exist (cache_path, 0, NULL, NULL, BACKING_DIRECTORY_PERM) == -1) return ERROR;
    char work_path [MAX_PATH];  snprintf (work_path,  sizeof (work_path),  "%s/work", instances_path);
    if (ensure_directories_exist (work_path, 0, NULL, NULL, BACKING_DIRECTORY_PERM) == -1) return ERROR;
    unsigned long long cache_limit_blocks = conf_cache_size_mb * 2048; // convert MB to blocks
    unsigned long long work_limit_blocks  = conf_work_size_mb * 2048;
    if (work_limit_blocks==0) { // we take 0 as unlimited
        work_limit_blocks = ULLONG_MAX;
    }

    blobstore_set_error_function ( &bs_errors );
    if (cache_limit_blocks) {
        cache_bs = blobstore_open (cache_path, cache_limit_blocks, BLOBSTORE_FLAG_CREAT, BLOBSTORE_FORMAT_DIRECTORY, BLOBSTORE_REVOCATION_LRU, BLOBSTORE_SNAPSHOT_ANY);
        if (cache_bs==NULL) {
            logprintfl (EUCAERROR, "ERROR: failed to open/create cache blobstore: %s\n", blobstore_get_error_str(blobstore_get_error()));
            return ERROR;
        }
    }
    work_bs = blobstore_open (work_path, work_limit_blocks, BLOBSTORE_FLAG_CREAT, BLOBSTORE_FORMAT_FILES, BLOBSTORE_REVOCATION_NONE, BLOBSTORE_SNAPSHOT_ANY);
    if (work_bs==NULL) {
        logprintfl (EUCAERROR, "ERROR: failed to open/create work blobstore: %s\n", blobstore_get_error_str(blobstore_get_error()));
        logprintfl (EUCAERROR, "ERROR: %s\n", blobstore_get_last_trace());
        blobstore_close (cache_bs);
        return ERROR;
    }

    // set the initial value of the semaphore to the number of 
    // disk-intensive operations that can run in parallel on this node
    if (nc_state.concurrent_disk_ops && (disk_sem = sem_alloc (nc_state.concurrent_disk_ops, "mutex")) == NULL) {
        logprintfl (EUCAERROR, "failed to create and initialize disk semaphore\n");
        return ERROR;
    }

    return OK;
}
Пример #5
0
int check_backing_store (bunchOfInstances ** global_instances)
{
    instances = global_instances;

    if (work_bs) {
        if (blobstore_fsck (work_bs, stale_blob_examiner)) {
            logprintfl (EUCAERROR, "ERROR: work directory failed integrity check: %s\n", blobstore_get_error_str(blobstore_get_error()));
            blobstore_close (cache_bs);
            return ERROR;
        }
    }
    if (cache_bs) {
        if (blobstore_fsck (cache_bs, NULL)) { // TODO: verify checksums?
            logprintfl (EUCAERROR, "ERROR: cache failed integrity check: %s\n", blobstore_get_error_str(blobstore_get_error()));
            return ERROR;
        }
    }
    return OK;
}