Ejemplo n.º 1
0
static int perform_curl_io_and_complete(int *left)
{
	CURLMsg *msg;
	long ret;
	CURLMcode curlm_ret;
	CURLcode curl_ret;

	curlm_ret = curl_multi_perform(mcurl, left);
	if (curlm_ret != CURLM_OK) {
		return -1;
	}

	while (true) {
		CURL *handle;
		struct file *file;

		msg = curl_multi_info_read(mcurl, left);
		if (!msg) {
			break;
		}
		if (msg->msg != CURLMSG_DONE) {
			continue;
		}

		handle = msg->easy_handle;
		ret = 404;
		curl_ret = curl_easy_getinfo(handle, CURLINFO_RESPONSE_CODE, &ret);
		if (curl_ret != CURLE_OK) {
			continue;
		}

		curl_ret = curl_easy_getinfo(handle, CURLINFO_PRIVATE, (char **)&file);
		if (curl_ret != CURLE_OK) {
			curl_easy_cleanup(handle);
			continue;
		}

		/* The easy handle may have an error set, even if the server returns
		 * HTTP 200, so retry the download for this case. */
		if (ret == 200 && msg->data.result != CURLE_OK) {
			printf("Error for %s download: %s\n", file->hash,
			       curl_easy_strerror(msg->data.result));
			failed = list_prepend_data(failed, file);
		} else if (ret == 200) {
			/* When both web server and CURL report success, only then
			 * proceed to uncompress. */
			if (untar_full_download(file)) {
				printf("Error for %s tarfile extraction, (check free space for %s?)\n",
				       file->hash, state_dir);
				failed = list_prepend_data(failed, file);
			}
		} else if (ret == 0) {
			/* When using the FILE:// protocol, 0 indicates success.
			 * Otherwise, it means the web server hasn't responded yet.
			 */
			if (local_download) {
				if (untar_full_download(file)) {
					printf("Error for %s tarfile extraction, (check free space for %s?)\n",
					       file->hash, state_dir);
					failed = list_prepend_data(failed, file);
				}
			} else {
				printf("Error for %s download: No response received\n",
				       file->hash);
				failed = list_prepend_data(failed, file);
			}
		} else {
			printf("Error for %s download: Received %ld response\n", file->hash, ret);
			failed = list_prepend_data(failed, file);

			unlink_all_staged_content(file);
		}
		if (file->staging) {
			free(file->staging);
			file->staging = NULL;
		}

		/* NOTE: Intentionally no removal of file from hashmap.  All
		 * needed files need determined and queued in one complete
		 * preparation phase.  Once all needed files are all present,
		 * they can be staged.  Otherwise a complex datastructure and
		 * retries are needed to insure only one download of a file
		 * happens fully to success AND a HASH.tar is uncompressed to
		 * and HASH and staged to the _multiple_ filenames with that
		 * hash. */

		curl_multi_remove_handle(mcurl, handle);
		curl_easy_cleanup(handle);
		file->curl = NULL;
	}

	curlm_ret = curl_multi_perform(mcurl, left);
	if (curlm_ret != CURLM_OK) {
		return -1;
	}

	return 0;
}
Ejemplo n.º 2
0
/* This function is meant to be called while staging file to fix any missing/incorrect paths.
 * While staging a file, if its parent directory is missing, this would try to create the path
 * by breaking it into sub-paths and fixing them top down.
 * Here, target_MoM is the consolidated manifest for the version you are trying to update/verify.
 */
int verify_fix_path(char *targetpath, struct manifest *target_MoM)
{
	struct list *path_list = NULL; /* path_list contains the subparts in the path */
	char *path;
	char *tmp = NULL, *target = NULL;
	char *url = NULL;
	struct stat sb;
	int ret = 0;
	struct file *file;
	char *tar_dotfile = NULL;
	struct list *list1 = NULL;

	/* This shouldn't happen */
	if (strcmp(targetpath, "/") == 0) {
		return ret;
	}

	/* Removing trailing '/' from the path */
	path = strdup_or_die(targetpath);
	if (path[strlen(path) - 1] == '/') {
		path[strlen(path) - 1] = '\0';
	}

	/* Breaking down the path into parts.
	 * eg. Path /usr/bin/foo will be broken into /usr,/usr/bin and /usr/bin/foo
	 */
	while (strcmp(path, "/") != 0) {
		path_list = list_prepend_data(path_list, strdup_or_die(path));
		tmp = strdup_or_die(dirname(path));
		free_string(&path);
		path = tmp;
	}
	free_string(&path);

	list1 = list_head(path_list);
	while (list1) {
		path = list1->data;
		list1 = list1->next;

		free_string(&target);
		free_string(&tar_dotfile);
		free_string(&url);

		target = mk_full_filename(path_prefix, path);

		/* Search for the file in the manifest, to get the hash for the file */
		file = search_file_in_manifest(target_MoM, path);
		if (file == NULL) {
			fprintf(stderr, "Error: Path %s not found in any of the subscribed manifests"
					"in verify_fix_path for path_prefix %s\n",
				path, path_prefix);
			ret = -1;
			goto end;
		}

		if (file->is_deleted) {
			fprintf(stderr, "Error: Path %s found deleted in verify_fix_path\n", path);
			ret = -1;
			goto end;
		}

		ret = stat(target, &sb);
		if (ret == 0) {
			if (verify_file(file, target)) {
				continue;
			}
			fprintf(stderr, "Hash did not match for path : %s ... fixing\n", path);
		} else if (ret == -1 && errno == ENOENT) {
			fprintf(stderr, "Path %s is missing on the file system ... fixing\n", path);
		} else {
			goto end;
		}
		/* In some circumstances (Docker using layers between updates/bundle adds,
                 * corrupt staging content) we could have content which fails to stage.
		 * In order to avoid this causing failure in verify_fix_path, remove the
		 * staging content before proceeding. This also cleans up in case any prior
		 * download failed in a partial state.
		 */
		unlink_all_staged_content(file);

		string_or_die(&tar_dotfile, "%s/download/.%s.tar", state_dir, file->hash);

		string_or_die(&url, "%s/%i/files/%s.tar", content_url, file->last_change, file->hash);
		ret = swupd_curl_get_file(url, tar_dotfile);

		if (ret != 0) {
			fprintf(stderr, "Error: Failed to download file %s in verify_fix_path\n", file->filename);
			unlink(tar_dotfile);
			goto end;
		}
		if (untar_full_download(file) != 0) {
			fprintf(stderr, "Error: Failed to untar file %s\n", file->filename);
			ret = -1;
			goto end;
		}

		ret = do_staging(file, target_MoM);
		if (ret != 0) {
			fprintf(stderr, "Error: Path %s failed to stage in verify_fix_path\n", path);
			goto end;
		}
	}
end:
	free_string(&target);
	free_string(&tar_dotfile);
	free_string(&url);
	list_free_list_and_data(path_list, free_path_data);
	return ret;
}
Ejemplo n.º 3
0
/* This function will break if the same HASH.tar full file is downloaded
 * multiple times in parallel. */
int untar_full_download(void *data)
{
	struct file *file = data;
	char *tarfile;
	char *tar_dotfile;
	char *targetfile;
	struct stat stat;
	int err;
	char *tarcommand;

	string_or_die(&tar_dotfile, "%s/download/.%s.tar", state_dir, file->hash);
	string_or_die(&tarfile, "%s/download/%s.tar", state_dir, file->hash);
	string_or_die(&targetfile, "%s/staged/%s", state_dir, file->hash);

	/* If valid target file already exists, we're done.
	 * NOTE: this should NEVER happen given the checking that happens
	 *       ahead of queueing a download.  But... */
	if (lstat(targetfile, &stat) == 0) {
		if (verify_file(file, targetfile)) {
			unlink(tar_dotfile);
			unlink(tarfile);
			free(tar_dotfile);
			free(tarfile);
			free(targetfile);
			return 0;
		} else {
			unlink(tarfile);
			unlink(targetfile);
		}
	} else if (lstat(tarfile, &stat) == 0) {
		/* remove tar file from possible past failure */
		unlink(tarfile);
	}

	err = rename(tar_dotfile, tarfile);
	if (err) {
		free(tar_dotfile);
		goto exit;
	}
	free(tar_dotfile);

	err = check_tarfile_content(file, tarfile);
	if (err) {
		goto exit;
	}

	/* modern tar will automatically determine the compression type used */
	string_or_die(&tarcommand, TAR_COMMAND " -C %s/staged/ " TAR_PERM_ATTR_ARGS " -xf %s 2> /dev/null",
		      state_dir, tarfile);

	err = system(tarcommand);
	if (WIFEXITED(err)) {
		err = WEXITSTATUS(err);
	}
	free(tarcommand);
	if (err) {
		printf("ignoring tar extract failure for fullfile %s.tar (ret %d)\n",
		       file->hash, err);
		goto exit;
		/* FIXME: can we respond meaningfully to tar error codes?
		 * symlink untars may have perm/xattr complaints and non-zero
		 * tar return, but symlink (probably?) untarred ok.
		 *
		 * Also getting complaints on some new regular files?
		 *
		 * Either way we verify the hash later, so on error there,
		 * something could try to recover? */
	} else {
		/* Only unlink when tar succeeded, so we can examine the tar file
		 * in the failure case. */
		unlink(tarfile);
	}

	err = lstat(targetfile, &stat);
	if (!err && !verify_file(file, targetfile)) {
		/* Download was successful but the hash was bad. This is fatal*/
		printf("Error: File content hash mismatch for %s (bad server data?)\n", targetfile);
		exit(EXIT_FAILURE);
	}

exit:
	free(tarfile);
	free(targetfile);
	if (err) {
		unlink_all_staged_content(file);
	}
	return err;
}
Ejemplo n.º 4
0
/* This function will break if the same HASH.tar full file is downloaded
 * multiple times in parallel. */
int untar_full_download(void *data)
{
	struct file *file = data;
	char *tarfile;
	char *tar_dotfile;
	char *targetfile;
	struct stat stat;
	int err;

	string_or_die(&tar_dotfile, "%s/download/.%s.tar", state_dir, file->hash);
	string_or_die(&tarfile, "%s/download/%s.tar", state_dir, file->hash);
	string_or_die(&targetfile, "%s/staged/%s", state_dir, file->hash);

	/* If valid target file already exists, we're done.
	 * NOTE: this should NEVER happen given the checking that happens
	 *       ahead of queueing a download.  But... */
	if (lstat(targetfile, &stat) == 0) {
		if (verify_file(file, targetfile)) {
			unlink(tar_dotfile);
			unlink(tarfile);
			free_string(&tar_dotfile);
			free_string(&tarfile);
			free_string(&targetfile);
			return 0;
		} else {
			unlink(tarfile);
			unlink(targetfile);
		}
	} else if (lstat(tarfile, &stat) == 0) {
		/* remove tar file from possible past failure */
		unlink(tarfile);
	}

	err = rename(tar_dotfile, tarfile);
	if (err) {
		free_string(&tar_dotfile);
		goto exit;
	}
	free_string(&tar_dotfile);

	err = check_tarfile_content(file, tarfile);
	if (err) {
		goto exit;
	}

	/* modern tar will automatically determine the compression type used */
	char *outputdir;
	string_or_die(&outputdir, "%s/staged", state_dir);
	err = extract_to(tarfile, outputdir);
	free_string(&outputdir);
	if (err) {
		fprintf(stderr, "ignoring tar extract failure for fullfile %s.tar (ret %d)\n",
			file->hash, err);
		goto exit;
		/* TODO: respond to ARCHIVE_RETRY error codes
		 * libarchive returns ARCHIVE_RETRY when tar extraction fails but the
		 * operation is retry-able. We need to determine if it is worth our time
		 * to retry in these situations. */
	} else {
		/* Only unlink when tar succeeded, so we can examine the tar file
		 * in the failure case. */
		unlink(tarfile);
	}

	err = lstat(targetfile, &stat);
	if (!err && !verify_file(file, targetfile)) {
		/* Download was successful but the hash was bad. This is fatal*/
		fprintf(stderr, "Error: File content hash mismatch for %s (bad server data?)\n", targetfile);
		exit(EXIT_FAILURE);
	}

exit:
	free_string(&tarfile);
	free_string(&targetfile);
	if (err) {
		unlink_all_staged_content(file);
	}
	return err;
}