static int stat_blobstore (const char * conf_instances_path, const char * name, blobstore_meta * meta) { bzero (meta, sizeof (blobstore_meta)); char path [MAX_PATH]; snprintf (path, sizeof (path), "%s/%s", conf_instances_path, name); // stat the file system and return those numbers even if blobstore does not exist if (statfs_path (path, &(meta->fs_bytes_size), &(meta->fs_bytes_available), &(meta->fs_id)) != OK) { return ERROR; } // get the size and params of the blobstore, if it exists blobstore * bs = blobstore_open (path, 0, // any size 0, // no flags = do not create it BLOBSTORE_FORMAT_ANY, BLOBSTORE_REVOCATION_ANY, BLOBSTORE_SNAPSHOT_ANY); if (bs == NULL) return OK; blobstore_stat (bs, meta); blobstore_close (bs); return OK; }
int init_backing_store (const char * conf_instances_path, unsigned int conf_work_size_mb, unsigned int conf_cache_size_mb) { logprintfl (EUCAINFO, "initializing backing store...\n"); if (conf_instances_path == NULL) { logprintfl (EUCAERROR, "error: INSTANCE_PATH not specified\n"); return ERROR; } safe_strncpy (instances_path, conf_instances_path, sizeof (instances_path)); if (check_directory (instances_path)) { logprintfl (EUCAERROR, "error: INSTANCE_PATH (%s) does not exist!\n", instances_path); return ERROR; } char cache_path [MAX_PATH]; snprintf (cache_path, sizeof (cache_path), "%s/cache", instances_path); if (ensure_directories_exist (cache_path, 0, NULL, NULL, BACKING_DIRECTORY_PERM) == -1) return ERROR; char work_path [MAX_PATH]; snprintf (work_path, sizeof (work_path), "%s/work", instances_path); if (ensure_directories_exist (work_path, 0, NULL, NULL, BACKING_DIRECTORY_PERM) == -1) return ERROR; unsigned long long cache_limit_blocks = conf_cache_size_mb * 2048; // convert MB to blocks unsigned long long work_limit_blocks = conf_work_size_mb * 2048; if (work_limit_blocks==0) { // we take 0 as unlimited work_limit_blocks = ULLONG_MAX; } // by default we let blobstore pick the snapshot policy, which // will use device mapper if available, which is faster than copying blobstore_snapshot_t snapshot_policy = BLOBSTORE_SNAPSHOT_ANY; if (nc_state.disable_snapshots) { logprintfl (EUCAINFO, "if allocating storage, will avoid using snapshots\n"); snapshot_policy = BLOBSTORE_SNAPSHOT_NONE; } blobstore_set_error_function ( &bs_errors ); if (cache_limit_blocks) { cache_bs = blobstore_open (cache_path, cache_limit_blocks, BLOBSTORE_FLAG_CREAT, BLOBSTORE_FORMAT_DIRECTORY, BLOBSTORE_REVOCATION_LRU, snapshot_policy); if (cache_bs==NULL) { logprintfl (EUCAERROR, "ERROR: failed to open/create cache blobstore: %s\n", blobstore_get_error_str(blobstore_get_error())); return ERROR; } } work_bs = blobstore_open (work_path, work_limit_blocks, BLOBSTORE_FLAG_CREAT, BLOBSTORE_FORMAT_FILES, BLOBSTORE_REVOCATION_NONE, snapshot_policy); if (work_bs==NULL) { logprintfl (EUCAERROR, "ERROR: failed to open/create work blobstore: %s\n", blobstore_get_error_str(blobstore_get_error())); logprintfl (EUCAERROR, "ERROR: %s\n", blobstore_get_last_trace()); blobstore_close (cache_bs); return ERROR; } // set the initial value of the semaphore to the number of // disk-intensive operations that can run in parallel on this node if (nc_state.concurrent_disk_ops && (disk_sem = sem_alloc (nc_state.concurrent_disk_ops, "mutex")) == NULL) { logprintfl (EUCAERROR, "failed to create and initialize disk semaphore\n"); return ERROR; } return OK; }
static void stat_blobstore (const char * conf_instances_path, const char * name, blobstore_meta * meta) { bzero (meta, sizeof (blobstore_meta)); char path [MAX_PATH]; snprintf (path, sizeof (path), "%s/%s", conf_instances_path, name); blobstore * bs = blobstore_open (path, 0, // any size 0, // no flags = do not create it BLOBSTORE_FORMAT_ANY, BLOBSTORE_REVOCATION_ANY, BLOBSTORE_SNAPSHOT_ANY); if (bs == NULL) return; blobstore_stat (bs, meta); blobstore_close (bs); }
int check_backing_store (bunchOfInstances ** global_instances) { instances = global_instances; if (work_bs) { if (blobstore_fsck (work_bs, stale_blob_examiner)) { logprintfl (EUCAERROR, "ERROR: work directory failed integrity check: %s\n", blobstore_get_error_str(blobstore_get_error())); blobstore_close (cache_bs); return ERROR; } } if (cache_bs) { if (blobstore_fsck (cache_bs, NULL)) { // TODO: verify checksums? logprintfl (EUCAERROR, "ERROR: cache failed integrity check: %s\n", blobstore_get_error_str(blobstore_get_error())); return ERROR; } } return OK; }
//! //! Main entry point of the application //! //! @param[in] argc the number of parameter passed on the command line //! @param[in] argv the list of arguments //! //! @return EUCA_OK on success or EUCA_ERROR on failure. //! int main(int argc, char *argv[]) { int i = 0; int ret = EUCA_OK; int nparams = 0; int ncmds = 0; char *eq = NULL; char *key = NULL; char *val = NULL; char euca_root[] = ""; char argv_str[4096] = ""; char *cmd_name = NULL; char pid_file[EUCA_MAX_PATH] = ""; FILE *fp = NULL; pid_t pid = 0; artifact *root = NULL; blobstore *work_bs = NULL; blobstore *cache_bs = NULL; imager_param *cmd_params = NULL; log_fp_set(stderr); // imager logs to stderr so image data can be piped to stdout set_debug(print_debug); // initialize globals artifacts_map = map_create(10); // use $EUCALYPTUS env var if available euca_home = getenv(EUCALYPTUS_ENV_VAR_NAME); if (!euca_home) { euca_home = euca_root; } // save the command line into a buffer so it's easier to rerun it by hand argv_str[0] = '\0'; for (i = 0; i < argc; i++) { strncat(argv_str, "\"", sizeof(argv_str) - strlen(argv_str) - 1); strncat(argv_str, argv[i], sizeof(argv_str) - strlen(argv_str) - 1); strncat(argv_str, "\" ", sizeof(argv_str) - strlen(argv_str) - 1); } // initialize dependencies if (vmdk_init() == EUCA_OK) { vddk_available = TRUE; } // parse command-line parameters while (*(++argv)) { eq = strstr(*argv, "="); // all params have '='s if (eq == NULL) { // it's a command // process previous command, if any if (validate_cmd(ncmds, cmd_name, cmd_params, *argv) != NULL) ncmds++; // increment only if there was a previous command if (ncmds + 1 > MAX_REQS) err("too many commands (max is %d)", MAX_REQS); cmd_name = *argv; cmd_params = NULL; nparams = 0; } else { // this is a parameter if (strlen(eq) == 1) usage("parameters must have non-empty values"); *eq = '\0'; // split key from value if (strlen(*argv) == 1) usage("parameters must have non-empty names"); key = *argv; val = eq + 1; if (key == NULL || val == NULL) usage("syntax error in parameters"); if (key[0] == '-') key++; // skip '-' if any if (key[0] == '-') key++; // skip second '-' if any if (cmd_name == NULL) { // without a preceding command => global parameter set_global_parameter(key, val); continue; } if (cmd_params == NULL) { cmd_params = calloc(MAX_PARAMS + 1, sizeof(imager_param)); // +1 for terminating NULL if (!cmd_params) err("calloc failed"); } if (nparams + 1 > MAX_PARAMS) err("too many parameters (max is %d)", MAX_PARAMS); cmd_params[nparams].key = key; cmd_params[nparams].val = val; nparams++; } } if (validate_cmd(ncmds, cmd_name, cmd_params, *argv) != NULL) // validate last command ncmds++; LOGINFO("verified all parameters for %d command(s)\n", ncmds); if (print_argv) { LOGDEBUG("argv[]: %s\n", argv_str); } // record PID, which may be used by VB to kill the imager process (e.g., in cancelBundling) pid = getpid(); sprintf(pid_file, "%s/imager.pid", get_work_dir()); if ((fp = fopen(pid_file, "w")) == NULL) { err("could not create pid file"); } else { fprintf(fp, "%d", pid); fclose(fp); } // invoke the requirements checkers in the same order as on command line, // constructing the artifact tree originating at 'root' for (i = 0; i < ncmds; i++) { if (reqs[i].cmd->requirements != NULL) { art_set_instanceId(reqs[i].cmd->name); // for logging if ((root = reqs[i].cmd->requirements(&reqs[i], root)) == NULL) // pass results of earlier checkers to later checkers err("failed while verifying requirements"); } } // it is OK for root to be NULL at this point // see if work blobstore will be needed at any stage // and open or create the work blobstore if (root && tree_uses_blobstore(root)) { // set the function that will catch blobstore errors blobstore_set_error_function(&bs_errors); if (ensure_directories_exist(get_work_dir(), 0, NULL, NULL, BLOBSTORE_DIRECTORY_PERM) == -1) err("failed to open or create work directory %s", get_work_dir()); work_bs = blobstore_open(get_work_dir(), get_work_limit() / 512, BLOBSTORE_FLAG_CREAT, BLOBSTORE_FORMAT_FILES, BLOBSTORE_REVOCATION_NONE, BLOBSTORE_SNAPSHOT_ANY); if (work_bs == NULL) { err("failed to open work blobstore: %s", blobstore_get_error_str(blobstore_get_error())); } // no point in fscking the work blobstore as it was just created } // see if cache blobstore will be needed at any stage if (root && tree_uses_cache(root)) { if (ensure_directories_exist(get_cache_dir(), 0, NULL, NULL, BLOBSTORE_DIRECTORY_PERM) == -1) err("failed to open or create cache directory %s", get_cache_dir()); cache_bs = blobstore_open(get_cache_dir(), get_cache_limit() / 512, BLOBSTORE_FLAG_CREAT, BLOBSTORE_FORMAT_DIRECTORY, BLOBSTORE_REVOCATION_LRU, BLOBSTORE_SNAPSHOT_ANY); if (cache_bs == NULL) { blobstore_close(work_bs); err("failed to open cache blobstore: %s\n", blobstore_get_error_str(blobstore_get_error())); } if (blobstore_fsck(cache_bs, NULL)) //! @TODO: verify checksums? err("cache blobstore failed integrity check: %s", blobstore_get_error_str(blobstore_get_error())); if (stat_blobstore(get_cache_dir(), cache_bs)) err("blobstore is unreadable"); } // implement the artifact tree ret = EUCA_OK; if (root) { art_set_instanceId("imager"); // for logging ret = art_implement_tree(root, work_bs, cache_bs, NULL, INSTANCE_PREP_TIMEOUT_USEC); // do all the work! } // invoke the cleaners for each command to tidy up disk space and memory allocations for (i = 0; i < ncmds; i++) { if (reqs[i].cmd->cleanup != NULL) { art_set_instanceId(reqs[i].cmd->name); // for logging reqs[i].cmd->cleanup(&reqs[i], (i == (ncmds - 1)) ? (TRUE) : (FALSE)); } } // free the artifact tree if (root) { if (tree_uses_blobstore(root)) { if (blobstore_fsck(work_bs, stale_blob_examiner)) { // will remove all blobs LOGWARN("failed to clean up work space: %s\n", blobstore_get_error_str(blobstore_get_error())); } } art_free(root); } clean_work_dir(work_bs); // indicate completion LOGINFO("imager done (exit code=%d)\n", ret); exit(ret); }