/** * Build supermin appliance from C<supermin_path> to * F<$TMPDIR/.guestfs-$UID>. * * Returns: C<0> = built or C<-1> = error (aborts launch). */ static int build_supermin_appliance (guestfs_h *g, const char *supermin_path, char **kernel, char **initrd, char **appliance) { CLEANUP_FREE char *cachedir = NULL, *lockfile = NULL, *appliancedir = NULL; cachedir = guestfs_int_lazy_make_supermin_appliance_dir (g); if (cachedir == NULL) return -1; appliancedir = safe_asprintf (g, "%s/appliance.d", cachedir); lockfile = safe_asprintf (g, "%s/lock", cachedir); debug (g, "begin building supermin appliance"); /* Build the appliance if it needs to be built. */ debug (g, "run supermin"); if (run_supermin_build (g, lockfile, appliancedir, supermin_path) == -1) return -1; debug (g, "finished building supermin appliance"); /* Return the appliance filenames. */ *kernel = safe_asprintf (g, "%s/kernel", appliancedir); *initrd = safe_asprintf (g, "%s/initrd", appliancedir); *appliance = safe_asprintf (g, "%s/root", appliancedir); /* Touch the files so they don't get deleted (as they are in /var/tmp). */ (void) utimes (*kernel, NULL); (void) utimes (*initrd, NULL); /* Checking backend != "uml" is a big hack. UML encodes the mtime * of the original backing file (in this case, the appliance) in the * COW file, and checks it when adding it to the VM. If there are * multiple threads running and one touches the appliance here, it * will disturb the mtime and UML will give an error. * * We can get rid of this hack as soon as UML fixes the * ubdN=cow,original parsing bug, since we won't need to run * uml_mkcow separately, so there is no possible race. * * XXX */ if (STRNEQ (g->backend, "uml")) (void) utimes (*appliance, NULL); return 0; }
/** * Test qemu binary (or wrapper) runs, and do C<qemu -help> so we know * the version of qemu what options this qemu supports, and * C<qemu -device ?> so we know what devices are available. * * The version number of qemu (from the C<-help> output) is saved in * C<&qemu_version>. * * This caches the results in the cachedir so that as long as the qemu * binary does not change, calling this is effectively free. */ struct qemu_data * guestfs_int_test_qemu (guestfs_h *g, struct version *qemu_version) { struct qemu_data *data; struct stat statbuf; CLEANUP_FREE char *cachedir = NULL, *qemu_stat_filename = NULL, *qemu_help_filename = NULL, *qemu_devices_filename = NULL; FILE *fp; int generation; uint64_t prev_size, prev_mtime; if (stat (g->hv, &statbuf) == -1) { perrorf (g, "stat: %s", g->hv); return NULL; } cachedir = guestfs_int_lazy_make_supermin_appliance_dir (g); if (cachedir == NULL) return NULL; qemu_stat_filename = safe_asprintf (g, "%s/qemu.stat", cachedir); qemu_help_filename = safe_asprintf (g, "%s/qemu.help", cachedir); qemu_devices_filename = safe_asprintf (g, "%s/qemu.devices", cachedir); /* Did we previously test the same version of qemu? */ debug (g, "checking for previously cached test results of %s, in %s", g->hv, cachedir); fp = fopen (qemu_stat_filename, "r"); if (fp == NULL) goto do_test; if (fscanf (fp, "%d %" SCNu64 " %" SCNu64, &generation, &prev_size, &prev_mtime) != 3) { fclose (fp); goto do_test; } fclose (fp); if (generation == MEMO_GENERATION && (uint64_t) statbuf.st_size == prev_size && (uint64_t) statbuf.st_mtime == prev_mtime) { /* Same binary as before, so read the previously cached qemu -help * and qemu -devices ? output. */ if (access (qemu_help_filename, R_OK) == -1 || access (qemu_devices_filename, R_OK) == -1) goto do_test; debug (g, "loading previously cached test results"); data = safe_calloc (g, 1, sizeof *data); if (guestfs_int_read_whole_file (g, qemu_help_filename, &data->qemu_help, NULL) == -1) { guestfs_int_free_qemu_data (data); return NULL; } parse_qemu_version (g, data->qemu_help, qemu_version); if (guestfs_int_read_whole_file (g, qemu_devices_filename, &data->qemu_devices, NULL) == -1) { guestfs_int_free_qemu_data (data); return NULL; } return data; } do_test: data = safe_calloc (g, 1, sizeof *data); if (test_qemu (g, data, qemu_version) == -1) { guestfs_int_free_qemu_data (data); return NULL; } /* Now memoize the qemu output in the cache directory. */ debug (g, "saving test results"); fp = fopen (qemu_help_filename, "w"); if (fp == NULL) { help_error: perrorf (g, "%s", qemu_help_filename); if (fp != NULL) fclose (fp); guestfs_int_free_qemu_data (data); return NULL; } if (fprintf (fp, "%s", data->qemu_help) == -1) goto help_error; if (fclose (fp) == -1) goto help_error; fp = fopen (qemu_devices_filename, "w"); if (fp == NULL) { devices_error: perrorf (g, "%s", qemu_devices_filename); if (fp != NULL) fclose (fp); guestfs_int_free_qemu_data (data); return NULL; } if (fprintf (fp, "%s", data->qemu_devices) == -1) goto devices_error; if (fclose (fp) == -1) goto devices_error; /* Write the qemu.stat file last so that its presence indicates that * the qemu.help and qemu.devices files ought to exist. */ fp = fopen (qemu_stat_filename, "w"); if (fp == NULL) { stat_error: perrorf (g, "%s", qemu_stat_filename); if (fp != NULL) fclose (fp); guestfs_int_free_qemu_data (data); return NULL; } /* The path to qemu is stored for information only, it is not * used when we parse the file. */ if (fprintf (fp, "%d %" PRIu64 " %" PRIu64 " %s\n", MEMO_GENERATION, (uint64_t) statbuf.st_size, (uint64_t) statbuf.st_mtime, g->hv) == -1) goto stat_error; if (fclose (fp) == -1) goto stat_error; return data; }