bool OdinPatcher::Impl::openInputArchive() { assert(aInput == nullptr); aInput = archive_read_new(); // TODO: Eventually support tar within a zip since many people distribute // stock firmware packages in this format //archive_read_support_format_zip(aInput); archive_read_support_format_tar(aInput); archive_read_support_filter_gzip(aInput); archive_read_support_filter_xz(aInput); // Our callbacks use io::File, which supports LFS on Android. Also allows // progress info by counting number of bytes read. int ret = archive_read_open2(aInput, this, &Impl::laOpenCb, &Impl::laReadCb, &Impl::laSkipCb, &Impl::laCloseCb); if (ret != ARCHIVE_OK) { LOGW("libarchive: Failed to open for reading: %s", archive_error_string(aInput)); archive_read_free(aInput); aInput = nullptr; error = ErrorCode::ArchiveReadOpenError; return false; } return true; }
static int append_archive_tarsnap(struct bsdtar *bsdtar, struct archive *a, const char *tapename) { struct archive *ina; void * cookie; int rc; ina = archive_read_new(); archive_read_support_format_tar(ina); archive_read_support_compression_none(ina); cookie = archive_read_open_multitape(ina, bsdtar->machinenum, tapename); if (cookie == NULL) { bsdtar_warnc(bsdtar, 0, "%s", archive_error_string(ina)); bsdtar->return_value = 1; return (0); } rc = append_archive(bsdtar, a, ina, cookie); /* Handle errors which haven't already been reported. */ if (archive_errno(ina)) { bsdtar_warnc(bsdtar, 0, "Error reading archive %s: %s", tapename, archive_error_string(ina)); bsdtar->return_value = 1; } archive_read_finish(ina); return (rc); }
static void extract(const char *filename, int do_extract, int flags) { struct archive *a; struct archive *ext; struct archive_entry *entry; int r; a = archive_read_new(); ext = archive_write_disk_new(); archive_write_disk_set_options(ext, flags); /* * Note: archive_write_disk_set_standard_lookup() is useful * here, but it requires library routines that can add 500k or * more to a static executable. */ archive_read_support_format_tar(a); /* * On my system, enabling other archive formats adds 20k-30k * each. Enabling gzip decompression adds about 20k. * Enabling bzip2 is more expensive because the libbz2 library * isn't very well factored. */ if (filename != NULL && strcmp(filename, "-") == 0) filename = NULL; if ((r = archive_read_open_filename(a, filename, 10240))) fail("archive_read_open_filename()", archive_error_string(a), r); for (;;) { r = archive_read_next_header(a, &entry); if (r == ARCHIVE_EOF) break; if (r != ARCHIVE_OK) fail("archive_read_next_header()", archive_error_string(a), 1); if (verbose && do_extract) msg("x "); if (verbose || !do_extract) msg(archive_entry_pathname(entry)); if (do_extract) { r = archive_write_header(ext, entry); if (r != ARCHIVE_OK) warn("archive_write_header()", archive_error_string(ext)); else { copy_data(a, ext); r = archive_write_finish_entry(ext); if (r != ARCHIVE_OK) fail("archive_write_finish_entry()", archive_error_string(ext), 1); } } if (verbose || !do_extract) msg("\n"); } archive_read_close(a); archive_read_free(a); exit(0); }
void EnableArchiveFormats(struct archive *p_archive) { // archive_read_support_filter_bzip2(p_archive); // archive_read_support_filter_compress(p_archive); // archive_read_support_filter_gzip(p_archive); // archive_read_support_filter_grzip(p_archive); // archive_read_support_filter_lrzip(p_archive); // archive_read_support_filter_lzip(p_archive); archive_read_support_filter_lzma(p_archive); archive_read_support_filter_lzop(p_archive); archive_read_support_filter_none(p_archive); archive_read_support_filter_rpm(p_archive); archive_read_support_filter_uu(p_archive); archive_read_support_filter_xz(p_archive); // archive_read_support_format_7zip(p_archive); archive_read_support_format_ar(p_archive); archive_read_support_format_cab(p_archive); archive_read_support_format_cpio(p_archive); archive_read_support_format_gnutar(p_archive); // archive_read_support_format_iso9660(p_archive); archive_read_support_format_lha(p_archive); archive_read_support_format_mtree(p_archive); archive_read_support_format_rar(p_archive); archive_read_support_format_raw(p_archive); archive_read_support_format_tar(p_archive); archive_read_support_format_xar(p_archive); // archive_read_support_format_zip(p_archive); }
struct mp_archive *mp_archive_new(struct mp_log *log, struct stream *src, int flags) { struct mp_archive *mpa = talloc_zero(NULL, struct mp_archive); mpa->log = log; mpa->locale = newlocale(LC_ALL_MASK, "C.UTF-8", (locale_t)0); if (!mpa->locale) goto err; mpa->arch = archive_read_new(); mpa->primary_src = src; if (!mpa->arch) goto err; // first volume is the primary streame if (!add_volume(log ,mpa, src, src->url)) goto err; // try to open other volumes char** volumes = find_volumes(src); for (int i = 0; volumes[i]; i++) { if (!add_volume(log, mpa, NULL, volumes[i])) { talloc_free(volumes); goto err; } } talloc_free(volumes); locale_t oldlocale = uselocale(mpa->locale); archive_read_support_format_7zip(mpa->arch); archive_read_support_format_iso9660(mpa->arch); archive_read_support_format_rar(mpa->arch); archive_read_support_format_zip(mpa->arch); archive_read_support_filter_bzip2(mpa->arch); archive_read_support_filter_gzip(mpa->arch); archive_read_support_filter_xz(mpa->arch); if (flags & MP_ARCHIVE_FLAG_UNSAFE) { archive_read_support_format_gnutar(mpa->arch); archive_read_support_format_tar(mpa->arch); } archive_read_set_read_callback(mpa->arch, read_cb); archive_read_set_skip_callback(mpa->arch, skip_cb); archive_read_set_switch_callback(mpa->arch, switch_cb); archive_read_set_open_callback(mpa->arch, open_cb); archive_read_set_close_callback(mpa->arch, close_cb); if (mpa->primary_src->seekable) archive_read_set_seek_callback(mpa->arch, seek_cb); bool fail = archive_read_open1(mpa->arch) < ARCHIVE_OK; uselocale(oldlocale); if (fail) goto err; return mpa; err: mp_archive_free(mpa); return NULL; }
static void pack_extract(const char *pack, const char *dbname, const char *dbpath) { struct archive *a = NULL; struct archive_entry *ae = NULL; if (access(pack, F_OK) != 0) return; a = archive_read_new(); archive_read_support_filter_all(a); archive_read_support_format_tar(a); if (archive_read_open_filename(a, pack, 4096) != ARCHIVE_OK) { /* if we can't unpack it it won't be useful for us */ unlink(pack); archive_read_free(a); return; } while (archive_read_next_header(a, &ae) == ARCHIVE_OK) { if (strcmp(archive_entry_pathname(ae), dbname) == 0) { archive_entry_set_pathname(ae, dbpath); archive_read_extract(a, ae, EXTRACT_ARCHIVE_FLAGS); break; } } archive_read_free(a); }
void TarUtils::read( const ibrcommon::File &extract_folder, std::istream &input ) { struct archive *a; struct archive_entry *entry; int ret,fd; a = archive_read_new(); archive_read_support_filter_all(a); archive_read_support_compression_all(a); archive_read_support_format_tar(a); archive_read_open(a, (void*) &input, &__tar_utils_open_callback, &__tar_utils_read_callback, &__tar_utils_close_callback); while ((ret = archive_read_next_header(a, &entry)) == ARCHIVE_OK ) { ibrcommon::File filename = extract_folder.get(archive_entry_pathname(entry)); ibrcommon::File path = filename.getParent(); ibrcommon::File::createDirectory(path); fd = open(filename.getPath().c_str(),O_CREAT|O_WRONLY,0600); if(fd < 0) throw ibrcommon::IOException("cannot open file " + path.getPath()); archive_read_data_into_fd(a,fd); close(fd); } archive_read_free(a); }
int extract_archive(char *filename) { struct archive *a; struct archive_entry *entry; int r; size_t size; char buff[BUFFER_SIZE]; FILE *fd; a = archive_read_new(); archive_read_support_compression_gzip(a); archive_read_support_format_tar(a); r = archive_read_open_filename(a, filename, 10240); if (r != ARCHIVE_OK) { return 1; } for (;;) { if ((r = archive_read_next_header(a, &entry))) { if (r != ARCHIVE_OK) { if (r == ARCHIVE_EOF) { return 0; }else{ return 1; } } } fd = fopen(archive_entry_pathname(entry),"wb"); if (fd == NULL) { fprintf(stderr, "problem extracting archive: %s: %s\n", filename, strerror(errno)); return 1; } for (;;) { size = archive_read_data(a, buff, BUFFER_SIZE); if (size < 0) { return 1; } if (size == 0) { break; } fwrite(buff, 1, size, fd); } fclose(fd); } r = archive_read_finish(a); if (r != ARCHIVE_OK) { return 4; } return 0; }
int archive_read_support_format_all(struct archive *a) { archive_check_magic(a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_support_format_all"); /* TODO: It would be nice to compute the ordering * here automatically so that people who enable just * a few formats can still get the benefits. That * may just require the format registration to include * a "maximum read-ahead" value (anything that uses seek * would be essentially infinite read-ahead). The core * bid management can then sort the bidders before calling * them. * * If you implement the above, please return the list below * to alphabetic order. */ /* * These bidders are all pretty cheap; they just examine a * small initial part of the archive. If one of these bids * high, we can maybe avoid running any of the more expensive * bidders below. */ archive_read_support_format_ar(a); archive_read_support_format_cpio(a); archive_read_support_format_empty(a); archive_read_support_format_lha(a); archive_read_support_format_mtree(a); archive_read_support_format_tar(a); archive_read_support_format_xar(a); /* * Install expensive bidders last. By doing them last, we * increase the chance that a high bid from someone else will * make it unnecessary for these to do anything at all. */ /* These three have potentially large look-ahead. */ archive_read_support_format_7zip(a); archive_read_support_format_cab(a); archive_read_support_format_rar(a); archive_read_support_format_iso9660(a); /* Seek is really bad, since it forces the read-ahead * logic to discard buffered data. */ archive_read_support_format_zip(a); /* Note: We always return ARCHIVE_OK here, even if some of the * above return ARCHIVE_WARN. The intent here is to enable * "as much as possible." Clients who need specific * compression should enable those individually so they can * verify the level of support. */ /* Clear any warning messages set by the above functions. */ archive_clear_error(a); return (ARCHIVE_OK); }
static bool taste_tar(io_block_t *ib) { struct archive *ar = archive_read_new(); archive_read_support_compression_none(ar); archive_read_support_format_tar(ar); archive_read_open_memory(ar, ib->output, ib->outsize); struct archive_entry *entry; bool ok = (archive_read_next_header(ar, &entry) == ARCHIVE_OK); archive_read_finish(ar); return ok; }
static int extract_pkg_static(int fd, char *p, int sz) { struct archive *a; struct archive_entry *ae; char *end; int ret, r; ret = -1; a = archive_read_new(); if (a == NULL) { warn("archive_read_new"); return (ret); } archive_read_support_compression_all(a); archive_read_support_format_tar(a); if (lseek(fd, 0, 0) == -1) { warn("lseek"); goto cleanup; } if (archive_read_open_fd(a, fd, 4096) != ARCHIVE_OK) { warnx("archive_read_open_fd: %s", archive_error_string(a)); goto cleanup; } ae = NULL; while ((r = archive_read_next_header(a, &ae)) == ARCHIVE_OK) { end = strrchr(archive_entry_pathname(ae), '/'); if (end == NULL) continue; if (strcmp(end, "/pkg-static") == 0) { r = archive_read_extract(a, ae, ARCHIVE_EXTRACT_OWNER | ARCHIVE_EXTRACT_PERM | ARCHIVE_EXTRACT_TIME | ARCHIVE_EXTRACT_ACL | ARCHIVE_EXTRACT_FFLAGS | ARCHIVE_EXTRACT_XATTR); strlcpy(p, archive_entry_pathname(ae), sz); break; } } if (r == ARCHIVE_OK) ret = 0; else warnx("fail to extract pkg-static"); cleanup: archive_read_finish(a); return (ret); }
// Based on libarchive's public example code. // https://github.com/libarchive/libarchive/wiki/Examples#wiki-Constructing_Objects_On_Disk void GuiZipper::unpackFile(const char *zipFile, const char *outputDir) throw (ZipperException*) { // TODO: use archive_write_disk_open instead (if/when it exists) char cwd[4096]; getcwd(cwd, 4096); char *absZipFile = fileManager_->getAbsFilePath(zipFile); platformstl::filesystem_traits<char> traits; traits.set_current_directory(outputDir); struct archive *a; struct archive *ext; struct archive_entry *entry; int flags; int r; flags = ARCHIVE_EXTRACT_TIME; flags |= ARCHIVE_EXTRACT_PERM; flags |= ARCHIVE_EXTRACT_ACL; flags |= ARCHIVE_EXTRACT_FFLAGS; a = archive_read_new(); archive_read_support_format_tar(a); archive_read_support_filter_gzip(a); ext = archive_write_disk_new(); archive_write_disk_set_options(ext, flags); archive_write_disk_set_standard_lookup(ext); r = archive_read_open_filename(a, absZipFile, 10240); checkForErrors("Error opening archive for reading", a, r); for (;;) { r = archive_read_next_header(a, &entry); if (r == ARCHIVE_EOF) { break; } checkForErrors("Error reading next archive header", a, r); r = archive_write_header(ext, entry); checkForErrors("Error writing next archive header", a, r); copyData(a, ext, outputDir); r = archive_write_finish_entry(ext); checkForErrors("Error writing archive finish entry", a, r); } r = archive_read_close(a); checkForErrors("Error closing read archive", a, r); r = archive_read_free(a); checkForErrors("Error freeing read archive", a, r); r = archive_write_close(ext); checkForErrors("Error closing write archive", a, r); r = archive_write_free(ext); checkForErrors("Error freeing write archive", a, r); traits.set_current_directory(cwd); delete absZipFile; }
/** * @brief Creates a new FreeBSD package from a FILE pointer * @param fd A pointer to a FILE object containing a FreeBSD Package * * This creates a pkg object from a given file pointer. * It is able to then manipulate the package and install the it to the pkg_db. * @todo Write * @return A new package object or NULL */ struct pkg * pkg_new_freebsd_from_file(FILE *fd) { struct pkg *pkg; struct freebsd_package *fpkg; const char *pkg_name; if (fd == NULL) return NULL; /* Create the new package data object */ fpkg = freebsd_package_new(); if (fpkg == NULL) return NULL; fpkg->fd = fd; fpkg->pkg_type = fpkg_from_file; fpkg->archive = archive_read_new(); archive_read_support_compression_bzip2(fpkg->archive); archive_read_support_compression_gzip(fpkg->archive); archive_read_support_format_tar(fpkg->archive); archive_read_open_stream(fpkg->archive, fd, 10240); /* * Get the +CONTENTS file. * We can't use the callbacks as we need the * package name to use with pkg_new */ freebsd_open_control_files(fpkg); assert(fpkg->control != NULL); freebsd_parse_contents(fpkg); assert(fpkg->contents != NULL); if (fpkg->contents->lines[1].line_type != PKG_LINE_NAME) { /** @todo cleanup */ return NULL; } pkg_name = fpkg->contents->lines[1].data; pkg = pkg_new(pkg_name, freebsd_get_control_files, freebsd_get_control_file, freebsd_get_deps, freebsd_free); if (pkg == NULL) { /** @todo cleanup */ return NULL; } pkg_add_callbacks_data(pkg, freebsd_get_version, freebsd_get_origin); pkg_add_callbacks_install(pkg, freebsd_get_next_file, freebsd_run_script); pkg->data = fpkg; return pkg; }
int archive_read_support_format_all(struct archive *a) { archive_read_support_format_ar(a); archive_read_support_format_cpio(a); archive_read_support_format_empty(a); archive_read_support_format_iso9660(a); archive_read_support_format_mtree(a); archive_read_support_format_tar(a); archive_read_support_format_xar(a); archive_read_support_format_zip(a); return (ARCHIVE_OK); }
/* Open an inner archive at the current position within the given outer archive. */ static struct archive *open_inner(struct archive *outer) { struct archive *inner; struct inner_data *data; int r; inner = archive_read_new(); if (!inner) { opkg_msg(ERROR, "Failed to create inner archive object.\n"); return NULL; } data = (struct inner_data *)xmalloc(sizeof(struct inner_data)); data->buffer = xmalloc(EXTRACT_BUFFER_LEN); data->outer = outer; /* Inner package is in 'tar' format, gzip compressed. */ r = archive_read_support_filter_gzip(inner); if (r == ARCHIVE_WARN) { /* libarchive returns ARCHIVE_WARN if the filter is provided by * an external program. */ opkg_msg(INFO, "Gzip support provided by external program.\n"); } else if (r != ARCHIVE_OK) { opkg_msg(ERROR, "Gzip format not supported.\n"); goto err_cleanup; } r = archive_read_support_format_tar(inner); if (r != ARCHIVE_OK) { opkg_msg(ERROR, "Tar format not supported: %s\n", archive_error_string(outer)); goto err_cleanup; } r = archive_read_open(inner, data, NULL, inner_read, inner_close); if (r != ARCHIVE_OK) { opkg_msg(ERROR, "Failed to open inner archive: %s\n", archive_error_string(inner)); goto err_cleanup; } return inner; err_cleanup: archive_read_free(inner); free(data->buffer); free(data); return NULL; }
static int archive_conv_open(struct archive_conv *conv, const struct repo_t *repo) { int r; /* generally, repo files are gzip compressed, but there's no guarantee of * this. in order to be compression-agnostic, use libarchive's reader/writer * methods. this also gives us an opportunity to rewrite the archive as CPIO, * which is marginally faster given our staunch sequential access. */ conv->reponame = repo->name; stpcpy(stpcpy(conv->tmpfile, repo->diskfile), "~"); conv->in = archive_read_new(); conv->out = archive_write_new(); if (conv->in == NULL || conv->out == NULL) { fputs("error: failed to allocate memory for archive objects\n", stderr); return -ENOMEM; } archive_read_support_format_tar(conv->in); archive_read_support_filter_all(conv->in); r = archive_read_open_fd(conv->in, repo->tmpfile.fd, BUFSIZ); if (r != ARCHIVE_OK) { fprintf(stderr, "error: failed to create archive reader for %s: %s\n", repo->name, strerror(archive_errno(conv->in))); r = archive_errno(conv->in); goto open_error; } archive_write_set_format_cpio_newc(conv->out); archive_write_add_filter(conv->out, repo->config->compress); r = archive_write_open_filename(conv->out, conv->tmpfile); if (r != ARCHIVE_OK) { fprintf(stderr, "error: failed to open file for writing: %s: %s\n", conv->tmpfile, strerror(archive_errno(conv->out))); r = archive_errno(conv->out); goto open_error; } return 0; open_error: archive_write_free(conv->out); archive_read_free(conv->in); return -r; }
static void extract (int fd) { struct archive *a; struct archive *ext; struct archive_entry *entry; int r; a = archive_read_new (); ext = archive_write_disk_new (); archive_read_support_format_tar (a); archive_read_support_filter_xz (a); archive_read_support_filter_gzip (a); if ((r = archive_read_open_fd (a, fd, 16*1024))) die_with_libarchive (a, "archive_read_open_fd: %s"); while (1) { r = archive_read_next_header (a, &entry); if (r == ARCHIVE_EOF) break; if (r != ARCHIVE_OK) die_with_libarchive (a, "archive_read_next_header: %s"); if (!should_extract (entry)) continue; r = archive_write_header (ext, entry); if (r != ARCHIVE_OK) die_with_libarchive (ext, "archive_write_header: %s"); else { copy_archive (a, ext); r = archive_write_finish_entry (ext); if (r != ARCHIVE_OK) die_with_libarchive (ext, "archive_write_finish_entry: %s"); } } archive_read_close (a); archive_read_free (a); archive_write_close (ext); archive_write_free (ext); }
void archive::applyFormats() { int r2 = archive_read_support_format_tar(m_archive); if (r2 != ARCHIVE_OK) { archive_read_free(m_archive); m_archive = nullptr; throw std::runtime_error("libstriezel::tar::archive::applyFormats(): Format not supported!"); } r2 = archive_read_support_format_gnutar(m_archive); if (r2 != ARCHIVE_OK) { archive_read_free(m_archive); m_archive = nullptr; throw std::runtime_error("libstriezel::tar::archive::applyFormats(): Format not supported!"); } }
static struct archive *file_type_archive_gen_archive(GBytes *data) {/*{{{*/ struct archive *archive = archive_read_new(); archive_read_support_format_zip(archive); archive_read_support_format_rar(archive); archive_read_support_format_7zip(archive); archive_read_support_format_tar(archive); archive_read_support_filter_all(archive); gsize data_size; char *data_ptr = (char *)g_bytes_get_data(data, &data_size); if(archive_read_open_memory(archive, data_ptr, data_size) != ARCHIVE_OK) { g_printerr("Failed to load archive: %s\n", archive_error_string(archive)); archive_read_free(archive); return NULL; } return archive; }/*}}}*/
int archive_read_support_format_by_code(struct archive *a, int format_code) { archive_check_magic(a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_support_format_by_code"); switch (format_code & ARCHIVE_FORMAT_BASE_MASK) { case ARCHIVE_FORMAT_7ZIP: return archive_read_support_format_7zip(a); break; case ARCHIVE_FORMAT_AR: return archive_read_support_format_ar(a); break; case ARCHIVE_FORMAT_CAB: return archive_read_support_format_cab(a); break; case ARCHIVE_FORMAT_CPIO: return archive_read_support_format_cpio(a); break; case ARCHIVE_FORMAT_ISO9660: return archive_read_support_format_iso9660(a); break; case ARCHIVE_FORMAT_LHA: return archive_read_support_format_lha(a); break; case ARCHIVE_FORMAT_MTREE: return archive_read_support_format_mtree(a); break; case ARCHIVE_FORMAT_RAR: return archive_read_support_format_rar(a); break; case ARCHIVE_FORMAT_TAR: return archive_read_support_format_tar(a); break; case ARCHIVE_FORMAT_XAR: return archive_read_support_format_xar(a); break; case ARCHIVE_FORMAT_ZIP: return archive_read_support_format_zip(a); break; } return (ARCHIVE_FATAL); }
static bool repo_open_local(struct xbps_repo *repo, const char *repofile) { struct stat st; int rv = 0; if (fstat(repo->fd, &st) == -1) { rv = errno; xbps_dbg_printf(repo->xhp, "[repo] `%s' fstat repodata %s\n", repofile, strerror(rv)); return false; } repo->ar = archive_read_new(); archive_read_support_compression_gzip(repo->ar); archive_read_support_format_tar(repo->ar); if (archive_read_open_fd(repo->ar, repo->fd, st.st_blksize) == ARCHIVE_FATAL) { rv = archive_errno(repo->ar); xbps_dbg_printf(repo->xhp, "[repo] `%s' failed to open repodata archive %s\n", repofile, strerror(rv)); return false; } if ((repo->idx = repo_get_dict(repo)) == NULL) { rv = archive_errno(repo->ar); xbps_dbg_printf(repo->xhp, "[repo] `%s' failed to internalize " " index on archive, removing file.\n", repofile); /* broken archive, remove it */ (void)unlink(repofile); return false; } xbps_dictionary_make_immutable(repo->idx); repo->idxmeta = repo_get_dict(repo); if (repo->idxmeta != NULL) { repo->is_signed = true; xbps_dictionary_make_immutable(repo->idxmeta); } return true; }
struct archive * pkg_archive_open(const char *path){ struct archive *a; if(path == NULL) RETURN_P_ERR(P_ERR_INVALID_DESCRIPTOR, NULL); a = archive_read_new(); if(a == NULL) return NULL; archive_read_support_compression_all(a); archive_read_support_format_tar(a); if(archive_read_open_file(a, path, 10240) != 0){ pkg_error(0, (char *)archive_error_string(a)); return NULL; } return a; }
static void test_empty_tarfile(void) { struct archive* a = archive_read_new(); struct archive_entry* e; /* Try opening an empty file with raw and empty handlers. */ assertEqualInt(ARCHIVE_OK, archive_read_support_format_tar(a)); assertEqualInt(0, archive_errno(a)); assertEqualString(NULL, archive_error_string(a)); assertEqualInt(ARCHIVE_OK, archive_read_open_filename(a, "empty.tar", 0)); assertEqualInt(0, archive_errno(a)); assertEqualString(NULL, archive_error_string(a)); assertEqualInt(ARCHIVE_EOF, archive_read_next_header(a, &e)); assertEqualInt(0, archive_errno(a)); assertEqualString(NULL, archive_error_string(a)); archive_read_free(a); }
/* * Read a manually-created archive that has filenames that are * stored in binary instead of UTF-8 and verify that we get * the right filename returned and that we get a warning only * if the header isn't marked as binary. */ static void test_pax_filename_encoding_1(void) { static const char testname[] = "test_pax_filename_encoding.tar"; /* * \314\214 is a valid 2-byte UTF-8 sequence. * \374 is invalid in UTF-8. */ char filename[] = "abc\314\214mno\374xyz"; struct archive *a; struct archive_entry *entry; /* * Read an archive that has non-UTF8 pax filenames in it. */ extract_reference_file(testname); a = archive_read_new(); assertEqualInt(ARCHIVE_OK, archive_read_support_format_tar(a)); assertEqualInt(ARCHIVE_OK, archive_read_support_filter_all(a)); assertEqualInt(ARCHIVE_OK, archive_read_open_filename(a, testname, 10240)); /* * First entry in this test archive has an invalid UTF-8 sequence * in it, but the header is not marked as hdrcharset=BINARY, so that * requires a warning. */ failure("Invalid UTF8 in a pax archive pathname should cause a warning"); assertEqualInt(ARCHIVE_WARN, archive_read_next_header(a, &entry)); assertEqualString(filename, archive_entry_pathname(entry)); /* * Second entry is identical except that it does have * hdrcharset=BINARY, so no warning should be generated. */ failure("A pathname with hdrcharset=BINARY can have invalid UTF8\n" " characters in it without generating a warning"); assertEqualInt(ARCHIVE_OK, archive_read_next_header(a, &entry)); assertEqualString(filename, archive_entry_pathname(entry)); archive_read_free(a); }
/* * Returns a pointer to be placed into the data of the Package object */ static struct freebsd_package * freebsd_get_package(FILE *fd) { struct freebsd_package *f_pkg; struct pkg_file *file; size_t control_size; unsigned int control_pos; f_pkg = malloc(sizeof(struct freebsd_package)); if (!f_pkg) { return NULL; } /* Init the struct */ f_pkg->next = NULL; f_pkg->control = NULL; f_pkg->contents = NULL; f_pkg->fd = fd; /* We only need to read from gzip and bzip2 as they * are the only posible file types for FreeBSD packages */ f_pkg->archive = archive_read_new(); archive_read_support_compression_bzip2(f_pkg->archive); archive_read_support_compression_gzip(f_pkg->archive); archive_read_support_format_tar(f_pkg->archive); if (archive_read_open_stream(f_pkg->archive, fd, 10240) != ARCHIVE_OK) { freebsd_free_package(f_pkg); return NULL; } /* Read the first file and check it has the correct name */ file = freebsd_get_next_entry(f_pkg->archive); if (!file) { freebsd_free_package(f_pkg); return NULL; } else if (strcmp(file->filename, "+CONTENTS")) { /* Package error */ pkg_file_free(file); freebsd_free_package(f_pkg); return NULL; } /* Set the control files array to be big enough for * the +CONTENTS file and a null terminator */ f_pkg->contents = pkg_freebsd_contents_new(file->contents); control_size = sizeof(struct pkg_file *) * 2; f_pkg->control = malloc(control_size); f_pkg->control[0] = file; f_pkg->control[1] = NULL; control_pos = 1; /* Add all the control files to the control array */ while (1) { file = freebsd_get_next_entry(f_pkg->archive); if (file == NULL) { break; } else if (file->filename[0] != '+') { f_pkg->next = file; break; } else { control_size += sizeof(struct pkg_file *); f_pkg->control = realloc(f_pkg->control, control_size); f_pkg->control[control_pos] = file; control_pos++; f_pkg->control[control_pos] = NULL; } } return f_pkg; }
int pkg_update(const char *name, const char *packagesite, bool force) { char url[MAXPATHLEN]; struct archive *a = NULL; struct archive_entry *ae = NULL; char repofile[MAXPATHLEN]; char repofile_unchecked[MAXPATHLEN]; char tmp[MAXPATHLEN]; const char *dbdir = NULL; const char *repokey; unsigned char *sig = NULL; int siglen = 0; int fd, rc = EPKG_FATAL, ret; struct stat st; time_t t = 0; sqlite3 *sqlite; char *archreq = NULL; const char *myarch; int64_t res; const char *tmpdir; snprintf(url, MAXPATHLEN, "%s/repo.txz", packagesite); tmpdir = getenv("TMPDIR"); if (tmpdir == NULL) tmpdir = "/tmp"; strlcpy(tmp, tmpdir, sizeof(tmp)); strlcat(tmp, "/repo.txz.XXXXXX", sizeof(tmp)); fd = mkstemp(tmp); if (fd == -1) { pkg_emit_error("Could not create temporary file %s, " "aborting update.\n", tmp); return (EPKG_FATAL); } if (pkg_config_string(PKG_CONFIG_DBDIR, &dbdir) != EPKG_OK) { pkg_emit_error("Cant get dbdir config entry"); return (EPKG_FATAL); } snprintf(repofile, sizeof(repofile), "%s/%s.sqlite", dbdir, name); if (force) t = 0; /* Always fetch */ else { if (stat(repofile, &st) != -1) { t = st.st_mtime; /* add 1 minute to the timestamp because * repo.sqlite is always newer than repo.txz, * 1 minute should be enough. */ t += 60; } } rc = pkg_fetch_file_to_fd(url, fd, t); close(fd); if (rc != EPKG_OK) { goto cleanup; } if (eaccess(repofile, W_OK) == -1) { pkg_emit_error("Insufficient privilege to update %s\n", repofile); rc = EPKG_ENOACCESS; goto cleanup; } a = archive_read_new(); archive_read_support_compression_all(a); archive_read_support_format_tar(a); archive_read_open_filename(a, tmp, 4096); while (archive_read_next_header(a, &ae) == ARCHIVE_OK) { if (strcmp(archive_entry_pathname(ae), "repo.sqlite") == 0) { snprintf(repofile_unchecked, sizeof(repofile_unchecked), "%s.unchecked", repofile); archive_entry_set_pathname(ae, repofile_unchecked); /* * The repo should be owned by root and not writable */ archive_entry_set_uid(ae, 0); archive_entry_set_gid(ae, 0); archive_entry_set_perm(ae, 0644); archive_read_extract(a, ae, EXTRACT_ARCHIVE_FLAGS); } if (strcmp(archive_entry_pathname(ae), "signature") == 0) { siglen = archive_entry_size(ae); sig = malloc(siglen); archive_read_data(a, sig, siglen); } } if (pkg_config_string(PKG_CONFIG_REPOKEY, &repokey) != EPKG_OK) { free(sig); return (EPKG_FATAL); } if (repokey != NULL) { if (sig != NULL) { ret = rsa_verify(repofile_unchecked, repokey, sig, siglen - 1); if (ret != EPKG_OK) { pkg_emit_error("Invalid signature, " "removing repository.\n"); unlink(repofile_unchecked); free(sig); rc = EPKG_FATAL; goto cleanup; } free(sig); } else { pkg_emit_error("No signature found in the repository. " "Can not validate against %s key.", repokey); rc = EPKG_FATAL; unlink(repofile_unchecked); goto cleanup; } } /* check is the repository is for valid architecture */ sqlite3_initialize(); if (sqlite3_open(repofile_unchecked, &sqlite) != SQLITE_OK) { unlink(repofile_unchecked); pkg_emit_error("Corrupted repository"); rc = EPKG_FATAL; goto cleanup; } pkg_config_string(PKG_CONFIG_ABI, &myarch); archreq = sqlite3_mprintf("select count(arch) from packages " "where arch not GLOB '%q'", myarch); if (get_pragma(sqlite, archreq, &res) != EPKG_OK) { sqlite3_free(archreq); pkg_emit_error("Unable to query repository"); rc = EPKG_FATAL; sqlite3_close(sqlite); goto cleanup; } if (res > 0) { pkg_emit_error("At least one of the packages provided by" "the repository is not compatible with your abi: %s", myarch); rc = EPKG_FATAL; sqlite3_close(sqlite); goto cleanup; } sqlite3_close(sqlite); sqlite3_shutdown(); if (rename(repofile_unchecked, repofile) != 0) { pkg_emit_errno("rename", ""); rc = EPKG_FATAL; goto cleanup; } if ((rc = remote_add_indexes(name)) != EPKG_OK) goto cleanup; rc = EPKG_OK; cleanup: if (a != NULL) archive_read_finish(a); (void)unlink(tmp); return (rc); }
/* * Same as 'c', except we only support tar or empty formats in * uncompressed files on disk. */ void tar_mode_r(struct bsdtar *bsdtar) { int64_t end_offset; int format; struct archive *a; struct archive_entry *entry; int r; /* Sanity-test some arguments and the file. */ test_for_append(bsdtar); format = ARCHIVE_FORMAT_TAR_PAX_RESTRICTED; #if defined(__BORLANDC__) bsdtar->fd = open(bsdtar->filename, O_RDWR | O_CREAT | O_BINARY); #else bsdtar->fd = open(bsdtar->filename, O_RDWR | O_CREAT | O_BINARY, 0666); #endif if (bsdtar->fd < 0) lafe_errc(1, errno, "Cannot open %s", bsdtar->filename); a = archive_read_new(); archive_read_support_filter_all(a); archive_read_support_format_empty(a); archive_read_support_format_tar(a); archive_read_support_format_gnutar(a); set_reader_options(bsdtar, a); r = archive_read_open_fd(a, bsdtar->fd, 10240); if (r != ARCHIVE_OK) lafe_errc(1, archive_errno(a), "Can't read archive %s: %s", bsdtar->filename, archive_error_string(a)); while (0 == archive_read_next_header(a, &entry)) { if (archive_filter_code(a, 0) != ARCHIVE_FILTER_NONE) { archive_read_free(a); close(bsdtar->fd); lafe_errc(1, 0, "Cannot append to compressed archive."); } /* Keep going until we hit end-of-archive */ format = archive_format(a); } end_offset = archive_read_header_position(a); archive_read_free(a); /* Re-open archive for writing */ a = archive_write_new(); /* * Set the format to be used for writing. To allow people to * extend empty files, we need to allow them to specify the format, * which opens the possibility that they will specify a format that * doesn't match the existing format. Hence, the following bit * of arcane ugliness. */ if (cset_get_format(bsdtar->cset) != NULL) { /* If the user requested a format, use that, but ... */ archive_write_set_format_by_name(a, cset_get_format(bsdtar->cset)); /* ... complain if it's not compatible. */ format &= ARCHIVE_FORMAT_BASE_MASK; if (format != (int)(archive_format(a) & ARCHIVE_FORMAT_BASE_MASK) && format != ARCHIVE_FORMAT_EMPTY) { lafe_errc(1, 0, "Format %s is incompatible with the archive %s.", cset_get_format(bsdtar->cset), bsdtar->filename); } } else { /* * Just preserve the current format, with a little care * for formats that libarchive can't write. */ if (format == ARCHIVE_FORMAT_EMPTY) format = ARCHIVE_FORMAT_TAR_PAX_RESTRICTED; archive_write_set_format(a, format); } if (lseek(bsdtar->fd, end_offset, SEEK_SET) < 0) lafe_errc(1, errno, "Could not seek to archive end"); set_writer_options(bsdtar, a); if (ARCHIVE_OK != archive_write_open_fd(a, bsdtar->fd)) lafe_errc(1, 0, "%s", archive_error_string(a)); write_archive(a, bsdtar); /* XXX check return val XXX */ close(bsdtar->fd); bsdtar->fd = -1; }
int pkg_create_repo(char *path, bool force, void (progress)(struct pkg *pkg, void *data), void *data) { FTS *fts = NULL; struct thd_data thd_data; int num_workers; size_t len; pthread_t *tids = NULL; struct pkg_dep *dep = NULL; struct pkg_category *category = NULL; struct pkg_license *license = NULL; struct pkg_option *option = NULL; struct pkg_shlib *shlib = NULL; sqlite3 *sqlite = NULL; int64_t package_id; char *errmsg = NULL; int retcode = EPKG_OK; int ret; char *repopath[2]; char repodb[MAXPATHLEN + 1]; char repopack[MAXPATHLEN + 1]; struct archive *a = NULL; struct archive_entry *ae = NULL; if (!is_dir(path)) { pkg_emit_error("%s is not a directory", path); return (EPKG_FATAL); } repopath[0] = path; repopath[1] = NULL; len = sizeof(num_workers); if (sysctlbyname("hw.ncpu", &num_workers, &len, NULL, 0) == -1) num_workers = 6; if ((fts = fts_open(repopath, FTS_PHYSICAL|FTS_NOCHDIR, NULL)) == NULL) { pkg_emit_errno("fts_open", path); retcode = EPKG_FATAL; goto cleanup; } snprintf(repodb, sizeof(repodb), "%s/repo.sqlite", path); snprintf(repopack, sizeof(repopack), "%s/repo.txz", path); if (access(repopack, F_OK) == 0) { a = archive_read_new(); archive_read_support_compression_all(a); archive_read_support_format_tar(a); ret = archive_read_open_filename(a, repopack, 4096); if (ret != ARCHIVE_OK) { /* if we can't unpack it it won't be useful for us */ unlink(repopack); } else { while (archive_read_next_header(a, &ae) == ARCHIVE_OK) { if (!strcmp(archive_entry_pathname(ae), "repo.sqlite")) { archive_entry_set_pathname(ae, repodb); archive_read_extract(a, ae, EXTRACT_ARCHIVE_FLAGS); break; } } } if (a != NULL) archive_read_finish(a); } if ((retcode = initialize_repo(repodb, force, &sqlite)) != EPKG_OK) goto cleanup; if ((retcode = initialize_prepared_statements(sqlite)) != EPKG_OK) goto cleanup; thd_data.root_path = path; thd_data.max_results = num_workers; thd_data.num_results = 0; thd_data.stop = false; thd_data.fts = fts; pthread_mutex_init(&thd_data.fts_m, NULL); STAILQ_INIT(&thd_data.results); thd_data.thd_finished = 0; pthread_mutex_init(&thd_data.results_m, NULL); pthread_cond_init(&thd_data.has_result, NULL); pthread_cond_init(&thd_data.has_room, NULL); /* Launch workers */ tids = calloc(num_workers, sizeof(pthread_t)); for (int i = 0; i < num_workers; i++) { pthread_create(&tids[i], NULL, (void *)&read_pkg_file, &thd_data); } for (;;) { struct pkg_result *r; const char *name, *version, *origin, *comment, *desc; const char *arch, *maintainer, *www, *prefix; int64_t flatsize; lic_t licenselogic; pthread_mutex_lock(&thd_data.results_m); while ((r = STAILQ_FIRST(&thd_data.results)) == NULL) { if (thd_data.thd_finished == num_workers) { break; } pthread_cond_wait(&thd_data.has_result, &thd_data.results_m); } if (r != NULL) { STAILQ_REMOVE_HEAD(&thd_data.results, next); thd_data.num_results--; pthread_cond_signal(&thd_data.has_room); } pthread_mutex_unlock(&thd_data.results_m); if (r == NULL) { break; } if (r->retcode != EPKG_OK) { continue; } /* do not add if package if already in repodb (possibly at a different pkg_path) */ if (run_prepared_statement(EXISTS, r->cksum) != SQLITE_ROW) { ERROR_SQLITE(sqlite); goto cleanup; } if (sqlite3_column_int(STMT(EXISTS), 0) > 0) { continue; } if (progress != NULL) progress(r->pkg, data); pkg_get(r->pkg, PKG_ORIGIN, &origin, PKG_NAME, &name, PKG_VERSION, &version, PKG_COMMENT, &comment, PKG_DESC, &desc, PKG_ARCH, &arch, PKG_MAINTAINER, &maintainer, PKG_WWW, &www, PKG_PREFIX, &prefix, PKG_FLATSIZE, &flatsize, PKG_LICENSE_LOGIC, &licenselogic); try_again: if ((ret = run_prepared_statement(PKG, origin, name, version, comment, desc, arch, maintainer, www, prefix, r->size, flatsize, (int64_t)licenselogic, r->cksum, r->path)) != SQLITE_DONE) { if (ret == SQLITE_CONSTRAINT) { switch(maybe_delete_conflicting(origin, version, r->path)) { case EPKG_FATAL: /* sqlite error */ ERROR_SQLITE(sqlite); retcode = EPKG_FATAL; goto cleanup; break; case EPKG_END: /* repo already has newer */ continue; break; default: /* conflict cleared, try again */ goto try_again; break; } } else { ERROR_SQLITE(sqlite); retcode = EPKG_FATAL; goto cleanup; } } package_id = sqlite3_last_insert_rowid(sqlite); dep = NULL; while (pkg_deps(r->pkg, &dep) == EPKG_OK) { if (run_prepared_statement(DEPS, pkg_dep_origin(dep), pkg_dep_name(dep), pkg_dep_version(dep), package_id) != SQLITE_DONE) { ERROR_SQLITE(sqlite); retcode = EPKG_FATAL; goto cleanup; } } category = NULL; while (pkg_categories(r->pkg, &category) == EPKG_OK) { const char *cat_name = pkg_category_name(category); ret = run_prepared_statement(CAT1, cat_name); if (ret == SQLITE_DONE) ret = run_prepared_statement(CAT2, package_id, cat_name); if (ret != SQLITE_DONE) { ERROR_SQLITE(sqlite); retcode = EPKG_FATAL; goto cleanup; } } license = NULL; while (pkg_licenses(r->pkg, &license) == EPKG_OK) { const char *lic_name = pkg_license_name(license); ret = run_prepared_statement(LIC1, lic_name); if (ret == SQLITE_DONE) ret = run_prepared_statement(LIC2, package_id, lic_name); if (ret != SQLITE_DONE) { ERROR_SQLITE(sqlite); retcode = EPKG_FATAL; goto cleanup; } } option = NULL; while (pkg_options(r->pkg, &option) == EPKG_OK) { if (run_prepared_statement(OPTS, pkg_option_opt(option), pkg_option_value(option), package_id) != SQLITE_DONE) { ERROR_SQLITE(sqlite); retcode = EPKG_FATAL; goto cleanup; } } shlib = NULL; while (pkg_shlibs(r->pkg, &shlib) == EPKG_OK) { const char *shlib_name = pkg_shlib_name(shlib); ret = run_prepared_statement(SHLIB1, shlib_name); if (ret == SQLITE_DONE) ret = run_prepared_statement(SHLIB2, package_id, shlib_name); if (ret != SQLITE_DONE) { ERROR_SQLITE(sqlite); retcode = EPKG_FATAL; goto cleanup; } } pkg_free(r->pkg); free(r); } if (pkgdb_transaction_commit(sqlite, NULL) != SQLITE_OK) retcode = EPKG_FATAL; cleanup: if (tids != NULL) { // Cancel running threads if (retcode != EPKG_OK) { pthread_mutex_lock(&thd_data.fts_m); thd_data.stop = true; pthread_mutex_unlock(&thd_data.fts_m); } // Join on threads to release thread IDs for (int i = 0; i < num_workers; i++) { pthread_join(tids[i], NULL); } free(tids); } if (fts != NULL) fts_close(fts); finalize_prepared_statements(); if (sqlite != NULL) sqlite3_close(sqlite); if (errmsg != NULL) sqlite3_free(errmsg); sqlite3_shutdown(); return (retcode); }
static void extract_archive(const char *filename, const char *output) { struct archive *a; struct archive *ext; struct archive_entry *entry; int flags; int r; flags = ARCHIVE_EXTRACT_PERM | ARCHIVE_EXTRACT_SECURE_NODOTDOT; a = archive_read_new(); #if ARCHIVE_VERSION_NUMBER < 4000000 archive_read_support_compression_bzip2(a); #else archive_read_support_filter_bzip2(a); #endif archive_read_support_format_tar(a); ext = archive_write_disk_new(); archive_write_disk_set_options(ext, flags); archive_write_disk_set_standard_lookup(ext); r = archive_read_open_file(a, filename, 10240); if (r) { std::string msg = extract_archive_error(filename, output, a); archive_read_close(a); #if ARCHIVE_VERSION_NUMBER < 4000000 archive_read_finish(a); #else archive_read_free(a); #endif throw utils::InternalError(msg); } for (;;) { r = archive_read_next_header(a, &entry); if (r == ARCHIVE_EOF) { break; } if (r != ARCHIVE_OK) { std::string msg = extract_archive_error(filename, output, a); archive_read_close(a); #if ARCHIVE_VERSION_NUMBER < 4000000 archive_read_finish(a); #else archive_read_free(a); #endif archive_write_close(ext); #if ARCHIVE_VERSION_NUMBER < 4000000 archive_write_finish(ext); #else archive_write_free(ext); #endif throw utils::InternalError(msg); } if (r < ARCHIVE_WARN) { break; } r = archive_write_header(ext, entry); if (r != ARCHIVE_OK) { std::string msg = extract_archive_error(filename, output, a); archive_read_close(a); #if ARCHIVE_VERSION_NUMBER < 4000000 archive_read_finish(a); #else archive_read_free(a); #endif archive_write_close(ext); #if ARCHIVE_VERSION_NUMBER < 4000000 archive_write_finish(ext); #else archive_write_free(ext); #endif throw utils::InternalError(msg); } else { r = copy_data(a, ext); if (r != ARCHIVE_OK) { std::string msg = extract_archive_error(filename, output, a); archive_read_close(a); #if ARCHIVE_VERSION_NUMBER < 4000000 archive_read_finish(a); #else archive_read_free(a); #endif archive_write_close(ext); #if ARCHIVE_VERSION_NUMBER < 4000000 archive_write_finish(ext); #else archive_write_free(ext); #endif throw utils::InternalError(msg); } } } archive_read_close(a); #if ARCHIVE_VERSION_NUMBER < 4000000 archive_read_finish(a); #else archive_read_free(a); #endif archive_write_close(ext); #if ARCHIVE_VERSION_NUMBER < 4000000 archive_write_finish(ext); #else archive_write_free(ext); #endif }
void tar_mode_u(struct bsdtar *bsdtar) { int64_t end_offset; struct archive *a; struct archive_entry *entry; int format; struct archive_dir_entry *p; struct archive_dir archive_dir; bsdtar->archive_dir = &archive_dir; memset(&archive_dir, 0, sizeof(archive_dir)); format = ARCHIVE_FORMAT_TAR_PAX_RESTRICTED; /* Sanity-test some arguments and the file. */ test_for_append(bsdtar); bsdtar->fd = open(bsdtar->filename, O_RDWR | O_BINARY); if (bsdtar->fd < 0) lafe_errc(1, errno, "Cannot open %s", bsdtar->filename); a = archive_read_new(); archive_read_support_filter_all(a); archive_read_support_format_tar(a); archive_read_support_format_gnutar(a); set_reader_options(bsdtar, a); if (archive_read_open_fd(a, bsdtar->fd, bsdtar->bytes_per_block) != ARCHIVE_OK) { lafe_errc(1, 0, "Can't open %s: %s", bsdtar->filename, archive_error_string(a)); } /* Build a list of all entries and their recorded mod times. */ while (0 == archive_read_next_header(a, &entry)) { if (archive_filter_code(a, 0) != ARCHIVE_FILTER_NONE) { archive_read_free(a); close(bsdtar->fd); lafe_errc(1, 0, "Cannot append to compressed archive."); } if (archive_match_exclude_entry(bsdtar->matching, ARCHIVE_MATCH_MTIME | ARCHIVE_MATCH_OLDER | ARCHIVE_MATCH_EQUAL, entry) != ARCHIVE_OK) lafe_errc(1, 0, "Error : %s", archive_error_string(bsdtar->matching)); /* Record the last format determination we see */ format = archive_format(a); /* Keep going until we hit end-of-archive */ } end_offset = archive_read_header_position(a); archive_read_free(a); /* Re-open archive for writing. */ a = archive_write_new(); /* * Set format to same one auto-detected above. */ archive_write_set_format(a, format); archive_write_set_bytes_per_block(a, bsdtar->bytes_per_block); archive_write_set_bytes_in_last_block(a, bsdtar->bytes_in_last_block); if (lseek(bsdtar->fd, end_offset, SEEK_SET) < 0) lafe_errc(1, errno, "Could not seek to archive end"); set_writer_options(bsdtar, a); if (ARCHIVE_OK != archive_write_open_fd(a, bsdtar->fd)) lafe_errc(1, 0, "%s", archive_error_string(a)); write_archive(a, bsdtar); close(bsdtar->fd); bsdtar->fd = -1; while (bsdtar->archive_dir->head != NULL) { p = bsdtar->archive_dir->head->next; free(bsdtar->archive_dir->head->name); free(bsdtar->archive_dir->head); bsdtar->archive_dir->head = p; } bsdtar->archive_dir->tail = NULL; }