/* * Verify that test_read_extract correctly works with * Zip entries that use length-at-end. */ static void verify_extract_length_at_end(struct archive *a, int seek_checks) { struct archive_entry *ae; assertEqualIntA(a, ARCHIVE_OK, archive_read_next_header(a, &ae)); assertEqualInt(archive_entry_is_encrypted(ae), 0); assertEqualIntA(a, archive_read_has_encrypted_entries(a), 0); assertEqualString("hello.txt", archive_entry_pathname(ae)); if (seek_checks) { assertEqualInt(AE_IFREG | 0644, archive_entry_mode(ae)); assert(archive_entry_size_is_set(ae)); assertEqualInt(6, archive_entry_size(ae)); } else { assert(!archive_entry_size_is_set(ae)); assertEqualInt(0, archive_entry_size(ae)); } if (archive_zlib_version() != NULL) { assertEqualIntA(a, ARCHIVE_OK, archive_read_extract(a, ae, 0)); assertFileContents("hello\x0A", 6, "hello.txt"); } else { assertEqualIntA(a, ARCHIVE_FAILED, archive_read_extract(a, ae, 0)); assertEqualString(archive_error_string(a), "Unsupported ZIP compression method (deflation)"); assert(archive_errno(a) != 0); } assertEqualIntA(a, ARCHIVE_OK, archive_read_close(a)); assertEqualIntA(a, ARCHIVE_OK, archive_read_free(a)); }
/** * Extract the given @archive into the current working directory. * This function returns -1 if an error occured, otherwise 0. */ static int extract_archive(struct archive *archive) { struct archive_entry *entry; int result = 0; int status; int flags = ARCHIVE_EXTRACT_PERM | ARCHIVE_EXTRACT_TIME | ARCHIVE_EXTRACT_ACL | ARCHIVE_EXTRACT_FFLAGS | ARCHIVE_EXTRACT_XATTR; /* Avoid spurious warnings. One should test for the CAP_CHOWN * capability instead but libarchive only does this test: */ if (geteuid() == 0) flags |= ARCHIVE_EXTRACT_OWNER; while (archive_read_next_header(archive, &entry) == ARCHIVE_OK) { status = archive_read_extract(archive, entry, flags); switch (status) { case ARCHIVE_OK: note(NULL, INFO, USER, "extracted: %s", archive_entry_pathname(entry)); break; default: result = -1; note(NULL, ERROR, INTERNAL, "%s: %s", archive_error_string(archive), strerror(archive_errno(archive))); break; } } return result; }
static int do_extract(struct archive *a, struct archive_entry *ae) { int retcode = EPKG_OK; int ret = 0; char path[MAXPATHLEN]; struct stat st; do { const char *pathname = archive_entry_pathname(ae); ret = archive_read_extract(a, ae, EXTRACT_ARCHIVE_FLAGS); if (ret != ARCHIVE_OK) { /* * show error except when the failure is during * extracting a directory and that the directory already * exists. * this allow to install packages linux_base from * package for example */ if (archive_entry_filetype(ae) != AE_IFDIR || !is_dir(pathname)) { pkg_emit_error("archive_read_extract(): %s", archive_error_string(a)); retcode = EPKG_FATAL; goto cleanup; } } /* * if the file is a configuration file and the configuration * file does not already exist on the file system, then * extract it * ex: conf1.cfg.pkgconf: * if conf1.cfg doesn't exists create it based on * conf1.cfg.pkgconf */ if (is_conf_file(pathname, path, sizeof(path)) && lstat(path, &st) == -1 && errno == ENOENT) { archive_entry_set_pathname(ae, path); ret = archive_read_extract(a,ae, EXTRACT_ARCHIVE_FLAGS); if (ret != ARCHIVE_OK) { pkg_emit_error("archive_read_extract(): %s", archive_error_string(a)); retcode = EPKG_FATAL; goto cleanup; } } } while ((ret = archive_read_next_header(a, &ae)) == ARCHIVE_OK); if (ret != ARCHIVE_EOF) { pkg_emit_error("archive_read_next_header(): %s", archive_error_string(a)); retcode = EPKG_FATAL; } cleanup: return (retcode); }
static int perform_extraction(alpm_handle_t *handle, struct archive *archive, struct archive_entry *entry, const char *filename) { int ret; const int archive_flags = ARCHIVE_EXTRACT_OWNER | ARCHIVE_EXTRACT_PERM | ARCHIVE_EXTRACT_TIME | ARCHIVE_EXTRACT_UNLINK | ARCHIVE_EXTRACT_SECURE_SYMLINKS; archive_entry_set_pathname(entry, filename); ret = archive_read_extract(archive, entry, archive_flags); if(ret == ARCHIVE_WARN && archive_errno(archive) != ENOSPC) { /* operation succeeded but a "non-critical" error was encountered */ _alpm_log(handle, ALPM_LOG_WARNING, _("warning given when extracting %s (%s)\n"), filename, archive_error_string(archive)); } else if(ret != ARCHIVE_OK) { _alpm_log(handle, ALPM_LOG_ERROR, _("could not extract %s (%s)\n"), filename, archive_error_string(archive)); alpm_logaction(handle, ALPM_CALLER_PREFIX, "error: could not extract %s (%s)\n", filename, archive_error_string(archive)); return 1; } return 0; }
static VALUE rb_libarchive_reader_extract(int argc, VALUE *argv, VALUE self) { VALUE v_entry, v_flags; struct rb_libarchive_archive_container *pa; struct rb_libarchive_entry_container *pae; int flags = 0; rb_scan_args(argc, argv, "11", &v_entry, &v_flags); Check_Class(v_entry, rb_cArchiveEntry); if (!NIL_P(v_flags)) { flags = (NUM2INT(v_flags) & EXTRACT_FLAGS); } Data_Get_Struct(self, struct rb_libarchive_archive_container, pa); Check_Archive(pa); if (pa->eof) { rb_raise(rb_eArchiveError, "Extract archive failed: It has already reached EOF"); } Data_Get_Struct(v_entry, struct rb_libarchive_entry_container, pae); Check_Entry(pae); if (archive_read_extract(pa->ar, pae->ae, flags) != ARCHIVE_OK) { rb_raise(rb_eArchiveError, "Extract archive failed: %s", archive_error_string(pa->ar)); } return Qnil; }
static void pack_extract(const char *pack, const char *dbname, const char *dbpath) { struct archive *a = NULL; struct archive_entry *ae = NULL; if (access(pack, F_OK) != 0) return; a = archive_read_new(); archive_read_support_filter_all(a); archive_read_support_format_tar(a); if (archive_read_open_filename(a, pack, 4096) != ARCHIVE_OK) { /* if we can't unpack it it won't be useful for us */ unlink(pack); archive_read_free(a); return; } while (archive_read_next_header(a, &ae) == ARCHIVE_OK) { if (strcmp(archive_entry_pathname(ae), dbname) == 0) { archive_entry_set_pathname(ae, dbpath); archive_read_extract(a, ae, EXTRACT_ARCHIVE_FLAGS); break; } } archive_read_free(a); }
static int extract_pkg_static(int fd, char *p, int sz) { struct archive *a; struct archive_entry *ae; char *end; int ret, r; ret = -1; a = archive_read_new(); if (a == NULL) { warn("archive_read_new"); return (ret); } archive_read_support_compression_all(a); archive_read_support_format_tar(a); if (lseek(fd, 0, 0) == -1) { warn("lseek"); goto cleanup; } if (archive_read_open_fd(a, fd, 4096) != ARCHIVE_OK) { warnx("archive_read_open_fd: %s", archive_error_string(a)); goto cleanup; } ae = NULL; while ((r = archive_read_next_header(a, &ae)) == ARCHIVE_OK) { end = strrchr(archive_entry_pathname(ae), '/'); if (end == NULL) continue; if (strcmp(end, "/pkg-static") == 0) { r = archive_read_extract(a, ae, ARCHIVE_EXTRACT_OWNER | ARCHIVE_EXTRACT_PERM | ARCHIVE_EXTRACT_TIME | ARCHIVE_EXTRACT_ACL | ARCHIVE_EXTRACT_FFLAGS | ARCHIVE_EXTRACT_XATTR); strlcpy(p, archive_entry_pathname(ae), sz); break; } } if (r == ARCHIVE_OK) ret = 0; else warnx("fail to extract pkg-static"); cleanup: archive_read_finish(a); return (ret); }
/* * Extract all of the archive members of the specified archive * object. If dest is not NULL, extract archive members to that * directory. If dest is not NULL and does not exist as a directory, * create it first. Return ARCHIVE_OK on success, ARCHIVE_* otherwise. */ int unpack_members_and_finish(struct archive *a, char *dest, filterfunc filter, void* userptr) { int restore = 0; char prevcwd[PATH_MAX]; struct archive_entry *e = NULL; if (getcwd(prevcwd, PATH_MAX) == NULL) { logMessage(ERROR, "unable to getcwd() (%s:%d): %m", __func__, __LINE__); return ARCHIVE_FATAL; } else { restore = 1; } if (dest != NULL) { if (unpack_mkpath(dest) != ARCHIVE_OK) { return ARCHIVE_FATAL; } else if (chdir(dest) == -1) { logMessage(ERROR, "unable to chdir %s (%s:%d): %m", dest, __func__, __LINE__); return ARCHIVE_FATAL; } } while (archive_read_next_header(a, &e) == ARCHIVE_OK) { const char *pathname = archive_entry_pathname(e); const struct stat *fstat = archive_entry_stat(e); if (filter && filter(pathname, fstat, userptr)) continue; if (archive_read_extract(a, e, 0) != ARCHIVE_OK) { logMessage(ERROR, "error unpacking %s (%s:%d): %s", pathname, __func__, __LINE__, archive_error_string(a)); return ARCHIVE_FATAL; } } if (restore && chdir(prevcwd) == -1) { logMessage(ERROR, "unable to chdir %s (%s:%d): %m", dest, __func__, __LINE__); return ARCHIVE_FATAL; } if (archive_read_finish(a) != ARCHIVE_OK) { logMessage(ERROR, "error closing archive (%s:%d): %s", __func__, __LINE__, archive_error_string(a)); return ARCHIVE_FATAL; } return ARCHIVE_OK; }
/* ArchiveReader::extractCurrentEntry {{{ * */ ZEND_METHOD(ArchiveReader, extractCurrentEntry) { zval *this = getThis(); archive_file_t *arch; int result, error_num, flags = 0; const char *error_string; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, ce_ArchiveException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|l", &flags) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (!_archive_get_fd(this, &arch TSRMLS_CC)) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (arch->current_entry == NULL) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Current archive entry is not available"); zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (arch->current_entry->data) { /* again, rather annoying libarchive limitation: you can't extract or * read entry anymore if it had been extracted/read before. * */ zend_restore_error_handling(&error_handling TSRMLS_CC); RETURN_FALSE; } result = archive_read_extract(arch->arch, arch->current_entry->entry, flags); if (result && result != ARCHIVE_EOF) { error_num = archive_errno(arch->arch); error_string = archive_error_string(arch->arch); if (error_num && error_string) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Failed to extract entry: error #%d, %s", error_num, error_string); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Failed to extract entry: unknown error %d", result); } zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); RETURN_TRUE; }
/* Extracts the downloaded archive and removes it upon success. * Assumed to be in destination directory before calling this. * Returns -1 on fatal errors, > 0 on extraction errors, 0 on success. */ int extract_file(const char *filename) { /* Extract the archive */ struct archive *archive; struct archive_entry *entry; int ret; int errors = 0; int extract_flags = ARCHIVE_EXTRACT_PERM | ARCHIVE_EXTRACT_TIME; archive = archive_read_new(); if (!archive) { return error(PW_ERR_ARCHIVE_CREATE); } archive_read_support_compression_all(archive); archive_read_support_format_all(archive); ret = archive_read_open_filename(archive, filename, 16384); if (ret != ARCHIVE_OK) { return error(PW_ERR_ARCHIVE_OPEN); } while (archive_read_next_header(archive, &entry) == ARCHIVE_OK) { ret = archive_read_extract(archive, entry, extract_flags); if (ret == ARCHIVE_WARN && archive_errno(archive) != ENOSPC) { pw_fprintf(PW_LOG_WARNING, stderr, "warning given when extracting %s: %s\n", archive_entry_pathname(entry), archive_error_string(archive)); } else if (ret != ARCHIVE_OK) { pw_fprintf(PW_LOG_ERROR, stderr, "Could not extract %s\n", archive_entry_pathname(entry)); ++errors; } if (config->verbose) { printf("X %s\n", archive_entry_pathname(entry)); } } archive_read_finish(archive); /* Everything successful. Remove the file */ unlink(filename); return errors; }
int do_extract_mtree(char *mtree, const char *prefix) { struct archive *a = NULL; struct archive_entry *ae; char path[MAXPATHLEN]; const char *fpath; int retcode = EPKG_OK; int ret; if (mtree == NULL || *mtree == '\0') return EPKG_OK; a = archive_read_new(); archive_read_support_filter_none(a); archive_read_support_format_mtree(a); if (archive_read_open_memory(a, mtree, strlen(mtree)) != ARCHIVE_OK) { pkg_emit_error("Fail to extract the mtree: %s", archive_error_string(a)); retcode = EPKG_FATAL; goto cleanup; } while ((ret = archive_read_next_header(a, &ae)) != ARCHIVE_EOF) { if (ret != ARCHIVE_OK) { pkg_emit_error("Skipping unsupported mtree line: %s", archive_error_string(a)); continue; } fpath = archive_entry_pathname(ae); if (*fpath != '/') { snprintf(path, sizeof(path), "%s/%s", prefix, fpath); archive_entry_set_pathname(ae, path); } /* Ignored failed extraction on purpose */ archive_read_extract(a, ae, EXTRACT_ARCHIVE_FLAGS); } cleanup: if (a != NULL) archive_read_free(a); return (retcode); }
/* * Issue 185: Test a regression that got in between 2.6 and 2.7 that * broke extraction of Zip entries with length-at-end. */ static void test_compat_zip_3(void) { const char *refname = "test_compat_zip_3.zip"; struct archive_entry *ae; struct archive *a; extract_reference_file(refname); assert((a = archive_read_new()) != NULL); assertEqualIntA(a, ARCHIVE_OK, archive_read_support_filter_all(a)); assertEqualIntA(a, ARCHIVE_OK, archive_read_support_format_all(a)); assertEqualIntA(a, ARCHIVE_OK, archive_read_open_filename(a, refname, 10240)); /* First entry. */ assertEqualIntA(a, ARCHIVE_OK, archive_read_next_header(a, &ae)); assertEqualString("soapui-4.0.0/", archive_entry_pathname(ae)); assertEqualInt(0, archive_entry_size(ae)); assert(archive_entry_size_is_set(ae)); assertEqualInt(AE_IFDIR, archive_entry_filetype(ae)); /* Second entry. */ assertEqualIntA(a, ARCHIVE_OK, archive_read_next_header(a, &ae)); assertEqualString("soapui-4.0.0/soapui-settings.xml", archive_entry_pathname(ae)); assertEqualInt(AE_IFREG, archive_entry_filetype(ae)); assertEqualInt(1030, archive_entry_size(ae)); assert(archive_entry_size_is_set(ae)); /* Extract under a different name. */ archive_entry_set_pathname(ae, "test_3.txt"); if(libz_enabled) { char *p; size_t s; assertEqualIntA(a, ARCHIVE_OK, archive_read_extract(a, ae, 0)); /* Verify the first 12 bytes actually got written to disk correctly. */ p = slurpfile(&s, "test_3.txt"); assertEqualInt(s, 1030); assertEqualMem(p, "<?xml versio", 12); free(p); } else { skipping("Skipping ZIP compression check, no libz support"); } assertEqualIntA(a, ARCHIVE_EOF, archive_read_next_header(a, &ae)); assertEqualIntA(a, ARCHIVE_OK, archive_read_close(a)); assertEqualIntA(a, ARCHIVE_OK, archive_read_free(a)); }
extern int lparchive_extract(lparchive_t *handle, char *path) { char cwd[1024]; unsigned int i; struct archive_entry *entry; struct archive *archive = handle->archive; if ( getcwd(cwd, 1024) == NULL ) return -1; if ( chdir(path) == -1 ) return -1; for (i=0; archive_read_next_header(archive, &entry) == ARCHIVE_OK; ++i) { if ( archive_read_extract(archive, entry, 0) != ARCHIVE_OK ) return -1; } if ( chdir(cwd) == -1 ) return -1; return 0; }
static int do_extract(struct archive *a, struct archive_entry *ae, const char *location, int nfiles, struct pkg *pkg, struct pkg *local) { int retcode = EPKG_OK; int ret = 0, cur_file = 0; char path[MAXPATHLEN], pathname[MAXPATHLEN], rpath[MAXPATHLEN]; struct stat st; const struct stat *aest; bool renamed = false; const struct pkg_file *rf; struct pkg_config_file *rcf; struct sbuf *newconf; bool automerge = pkg_object_bool(pkg_config_get("AUTOMERGE")); unsigned long set, clear; #ifndef HAVE_ARC4RANDOM srand(time(NULL)); #endif if (nfiles == 0) return (EPKG_OK); pkg_emit_extract_begin(pkg); pkg_emit_progress_start(NULL); newconf = sbuf_new_auto(); do { ret = ARCHIVE_OK; sbuf_clear(newconf); rf = NULL; rcf = NULL; pkg_absolutepath(archive_entry_pathname(ae), path, sizeof(path)); snprintf(pathname, sizeof(pathname), "%s%s%s", location ? location : "", *path == '/' ? "" : "/", path ); strlcpy(rpath, pathname, sizeof(rpath)); aest = archive_entry_stat(ae); archive_entry_fflags(ae, &set, &clear); if (lstat(rpath, &st) != -1) { /* * We have an existing file on the path, so handle it */ if (!S_ISDIR(aest->st_mode)) { pkg_debug(2, "Old version found, renaming"); pkg_add_file_random_suffix(rpath, sizeof(rpath), 12); renamed = true; } if (!S_ISDIR(st.st_mode) && S_ISDIR(aest->st_mode)) { if (S_ISLNK(st.st_mode)) { if (stat(rpath, &st) == -1) { pkg_emit_error("Dead symlink %s", rpath); } else { pkg_debug(2, "Directory is a symlink, use it"); pkg_emit_progress_tick(cur_file++, nfiles); continue; } } } } archive_entry_set_pathname(ae, rpath); /* load in memory the content of config files */ if (pkg_is_config_file(pkg, path, &rf, &rcf)) { pkg_debug(1, "Populating config_file %s", pathname); size_t len = archive_entry_size(ae); rcf->content = malloc(len); archive_read_data(a, rcf->content, len); if (renamed && (!automerge || local == NULL)) strlcat(pathname, ".pkgnew", sizeof(pathname)); } /* * check if the file is already provided by previous package */ if (!automerge) attempt_to_merge(renamed, rcf, local, pathname, path, newconf); if (sbuf_len(newconf) == 0 && (rcf == NULL || rcf->content == NULL)) { pkg_debug(1, "Extracting: %s", archive_entry_pathname(ae)); int install_as_user = (getenv("INSTALL_AS_USER") != NULL); int extract_flags = EXTRACT_ARCHIVE_FLAGS; if (install_as_user) { /* when installing as user don't try to set file ownership */ extract_flags &= ~ARCHIVE_EXTRACT_OWNER; } ret = archive_read_extract(a, ae, extract_flags); } else { if (sbuf_len(newconf) == 0) { sbuf_cat(newconf, rcf->content); sbuf_finish(newconf); } pkg_debug(2, "Writing conf in %s", pathname); unlink(rpath); FILE *f = fopen(rpath, "w+"); fprintf(f, "%s", sbuf_data(newconf)); fclose(f); } if (ret != ARCHIVE_OK) { /* * show error except when the failure is during * extracting a directory and that the directory already * exists. * this allow to install packages linux_base from * package for example */ if (archive_entry_filetype(ae) != AE_IFDIR || !is_dir(pathname)) { pkg_emit_error("archive_read_extract(): %s", archive_error_string(a)); retcode = EPKG_FATAL; goto cleanup; } } /* Reapply modes to the directories to work around a problem on FreeBSD 9 */ if (archive_entry_filetype(ae) == AE_IFDIR) chmod(pathname, aest->st_mode); pkg_emit_progress_tick(cur_file++, nfiles); /* Rename old file */ if (renamed) { pkg_debug(1, "Renaming %s -> %s", rpath, pathname); #ifdef HAVE_CHFLAGS bool old = false; if (set & NOCHANGESFLAGS) chflags(rpath, 0); if (lstat(pathname, &st) != -1) { old = true; if (st.st_flags & NOCHANGESFLAGS) chflags(pathname, 0); } #endif if (rename(rpath, pathname) == -1) { #ifdef HAVE_CHFLAGS /* restore flags */ if (old) chflags(pathname, st.st_flags); #endif pkg_emit_error("cannot rename %s to %s: %s", rpath, pathname, strerror(errno)); retcode = EPKG_FATAL; goto cleanup; } #ifdef HAVE_CHFLAGS /* Restore flags */ chflags(pathname, set); #endif } if (string_end_with(pathname, ".pkgnew")) pkg_emit_notice("New configuration file: %s", pathname); renamed = false; } while ((ret = archive_read_next_header(a, &ae)) == ARCHIVE_OK); if (ret != ARCHIVE_EOF) { pkg_emit_error("archive_read_next_header(): %s", archive_error_string(a)); retcode = EPKG_FATAL; } cleanup: pkg_emit_progress_tick(nfiles, nfiles); pkg_emit_extract_finished(pkg); if (renamed && retcode == EPKG_FATAL) { #ifdef HAVE_CHFLAGS if (set & NOCHANGESFLAGS) chflags(rpath, set & ~NOCHANGESFLAGS); #endif unlink(rpath); } return (retcode); }
bool Drumkit::install( const QString& path ) { _INFOLOG( QString( "Install drumkit %1" ).arg( path ) ); #ifdef H2CORE_HAVE_LIBARCHIVE int r; struct archive* arch; struct archive_entry* entry; arch = archive_read_new(); #if ARCHIVE_VERSION_NUMBER < 3000000 archive_read_support_compression_all( arch ); #else archive_read_support_filter_all( arch ); #endif archive_read_support_format_all( arch ); #if ARCHIVE_VERSION_NUMBER < 3000000 if ( ( r = archive_read_open_file( arch, path.toLocal8Bit(), 10240 ) ) ) { #else if ( ( r = archive_read_open_filename( arch, path.toLocal8Bit(), 10240 ) ) ) { #endif _ERRORLOG( QString( "archive_read_open_file() [%1] %2" ).arg( archive_errno( arch ) ).arg( archive_error_string( arch ) ) ); archive_read_close( arch ); #if ARCHIVE_VERSION_NUMBER < 3000000 archive_read_finish( arch ); #else archive_read_free( arch ); #endif return false; } bool ret = true; QString dk_dir = Filesystem::usr_drumkits_dir() + "/"; while ( ( r = archive_read_next_header( arch, &entry ) ) != ARCHIVE_EOF ) { if ( r != ARCHIVE_OK ) { _ERRORLOG( QString( "archive_read_next_header() [%1] %2" ).arg( archive_errno( arch ) ).arg( archive_error_string( arch ) ) ); ret = false; break; } QString np = dk_dir + archive_entry_pathname( entry ); QByteArray newpath = np.toLocal8Bit(); archive_entry_set_pathname( entry, newpath.data() ); r = archive_read_extract( arch, entry, 0 ); if ( r == ARCHIVE_WARN ) { _WARNINGLOG( QString( "archive_read_extract() [%1] %2" ).arg( archive_errno( arch ) ).arg( archive_error_string( arch ) ) ); } else if ( r != ARCHIVE_OK ) { _ERRORLOG( QString( "archive_read_extract() [%1] %2" ).arg( archive_errno( arch ) ).arg( archive_error_string( arch ) ) ); ret = false; break; } } archive_read_close( arch ); #if ARCHIVE_VERSION_NUMBER < 3000000 archive_read_finish( arch ); #else archive_read_free( arch ); #endif return ret; #else // H2CORE_HAVE_LIBARCHIVE #ifndef WIN32 // GUNZIP QString gzd_name = path.left( path.indexOf( "." ) ) + ".tar"; FILE* gzd_file = fopen( gzd_name.toLocal8Bit(), "wb" ); gzFile gzip_file = gzopen( path.toLocal8Bit(), "rb" ); if ( !gzip_file ) { _ERRORLOG( QString( "Error reading drumkit file: %1" ).arg( path ) ); gzclose( gzip_file ); fclose( gzd_file ); return false; } uchar buf[4096]; while ( gzread( gzip_file, buf, 4096 ) > 0 ) { fwrite( buf, sizeof( uchar ), 4096, gzd_file ); } gzclose( gzip_file ); fclose( gzd_file ); // UNTAR TAR* tar_file; QByteArray tar_path = gzd_name.toLocal8Bit(); if ( tar_open( &tar_file, tar_path.data(), NULL, O_RDONLY, 0, TAR_GNU ) == -1 ) { _ERRORLOG( QString( "tar_open(): %1" ).arg( QString::fromLocal8Bit( strerror( errno ) ) ) ); return false; } bool ret = true; char dst_dir[1024]; QString dk_dir = Filesystem::usr_drumkits_dir() + "/"; strncpy( dst_dir, dk_dir.toLocal8Bit(), 1024 ); if ( tar_extract_all( tar_file, dst_dir ) != 0 ) { _ERRORLOG( QString( "tar_extract_all(): %1" ).arg( QString::fromLocal8Bit( strerror( errno ) ) ) ); ret = false; } if ( tar_close( tar_file ) != 0 ) { _ERRORLOG( QString( "tar_close(): %1" ).arg( QString::fromLocal8Bit( strerror( errno ) ) ) ); ret = false; } return ret; #else // WIN32 _ERRORLOG( "WIN32 NOT IMPLEMENTED" ); return false; #endif #endif } };
int pkg_create_repo(char *path, bool force, void (progress)(struct pkg *pkg, void *data), void *data) { FTS *fts = NULL; struct thd_data thd_data; int num_workers; size_t len; pthread_t *tids = NULL; struct pkg_dep *dep = NULL; struct pkg_category *category = NULL; struct pkg_license *license = NULL; struct pkg_option *option = NULL; struct pkg_shlib *shlib = NULL; sqlite3 *sqlite = NULL; int64_t package_id; char *errmsg = NULL; int retcode = EPKG_OK; int ret; char *repopath[2]; char repodb[MAXPATHLEN + 1]; char repopack[MAXPATHLEN + 1]; struct archive *a = NULL; struct archive_entry *ae = NULL; if (!is_dir(path)) { pkg_emit_error("%s is not a directory", path); return (EPKG_FATAL); } repopath[0] = path; repopath[1] = NULL; len = sizeof(num_workers); if (sysctlbyname("hw.ncpu", &num_workers, &len, NULL, 0) == -1) num_workers = 6; if ((fts = fts_open(repopath, FTS_PHYSICAL|FTS_NOCHDIR, NULL)) == NULL) { pkg_emit_errno("fts_open", path); retcode = EPKG_FATAL; goto cleanup; } snprintf(repodb, sizeof(repodb), "%s/repo.sqlite", path); snprintf(repopack, sizeof(repopack), "%s/repo.txz", path); if (access(repopack, F_OK) == 0) { a = archive_read_new(); archive_read_support_compression_all(a); archive_read_support_format_tar(a); ret = archive_read_open_filename(a, repopack, 4096); if (ret != ARCHIVE_OK) { /* if we can't unpack it it won't be useful for us */ unlink(repopack); } else { while (archive_read_next_header(a, &ae) == ARCHIVE_OK) { if (!strcmp(archive_entry_pathname(ae), "repo.sqlite")) { archive_entry_set_pathname(ae, repodb); archive_read_extract(a, ae, EXTRACT_ARCHIVE_FLAGS); break; } } } if (a != NULL) archive_read_finish(a); } if ((retcode = initialize_repo(repodb, force, &sqlite)) != EPKG_OK) goto cleanup; if ((retcode = initialize_prepared_statements(sqlite)) != EPKG_OK) goto cleanup; thd_data.root_path = path; thd_data.max_results = num_workers; thd_data.num_results = 0; thd_data.stop = false; thd_data.fts = fts; pthread_mutex_init(&thd_data.fts_m, NULL); STAILQ_INIT(&thd_data.results); thd_data.thd_finished = 0; pthread_mutex_init(&thd_data.results_m, NULL); pthread_cond_init(&thd_data.has_result, NULL); pthread_cond_init(&thd_data.has_room, NULL); /* Launch workers */ tids = calloc(num_workers, sizeof(pthread_t)); for (int i = 0; i < num_workers; i++) { pthread_create(&tids[i], NULL, (void *)&read_pkg_file, &thd_data); } for (;;) { struct pkg_result *r; const char *name, *version, *origin, *comment, *desc; const char *arch, *maintainer, *www, *prefix; int64_t flatsize; lic_t licenselogic; pthread_mutex_lock(&thd_data.results_m); while ((r = STAILQ_FIRST(&thd_data.results)) == NULL) { if (thd_data.thd_finished == num_workers) { break; } pthread_cond_wait(&thd_data.has_result, &thd_data.results_m); } if (r != NULL) { STAILQ_REMOVE_HEAD(&thd_data.results, next); thd_data.num_results--; pthread_cond_signal(&thd_data.has_room); } pthread_mutex_unlock(&thd_data.results_m); if (r == NULL) { break; } if (r->retcode != EPKG_OK) { continue; } /* do not add if package if already in repodb (possibly at a different pkg_path) */ if (run_prepared_statement(EXISTS, r->cksum) != SQLITE_ROW) { ERROR_SQLITE(sqlite); goto cleanup; } if (sqlite3_column_int(STMT(EXISTS), 0) > 0) { continue; } if (progress != NULL) progress(r->pkg, data); pkg_get(r->pkg, PKG_ORIGIN, &origin, PKG_NAME, &name, PKG_VERSION, &version, PKG_COMMENT, &comment, PKG_DESC, &desc, PKG_ARCH, &arch, PKG_MAINTAINER, &maintainer, PKG_WWW, &www, PKG_PREFIX, &prefix, PKG_FLATSIZE, &flatsize, PKG_LICENSE_LOGIC, &licenselogic); try_again: if ((ret = run_prepared_statement(PKG, origin, name, version, comment, desc, arch, maintainer, www, prefix, r->size, flatsize, (int64_t)licenselogic, r->cksum, r->path)) != SQLITE_DONE) { if (ret == SQLITE_CONSTRAINT) { switch(maybe_delete_conflicting(origin, version, r->path)) { case EPKG_FATAL: /* sqlite error */ ERROR_SQLITE(sqlite); retcode = EPKG_FATAL; goto cleanup; break; case EPKG_END: /* repo already has newer */ continue; break; default: /* conflict cleared, try again */ goto try_again; break; } } else { ERROR_SQLITE(sqlite); retcode = EPKG_FATAL; goto cleanup; } } package_id = sqlite3_last_insert_rowid(sqlite); dep = NULL; while (pkg_deps(r->pkg, &dep) == EPKG_OK) { if (run_prepared_statement(DEPS, pkg_dep_origin(dep), pkg_dep_name(dep), pkg_dep_version(dep), package_id) != SQLITE_DONE) { ERROR_SQLITE(sqlite); retcode = EPKG_FATAL; goto cleanup; } } category = NULL; while (pkg_categories(r->pkg, &category) == EPKG_OK) { const char *cat_name = pkg_category_name(category); ret = run_prepared_statement(CAT1, cat_name); if (ret == SQLITE_DONE) ret = run_prepared_statement(CAT2, package_id, cat_name); if (ret != SQLITE_DONE) { ERROR_SQLITE(sqlite); retcode = EPKG_FATAL; goto cleanup; } } license = NULL; while (pkg_licenses(r->pkg, &license) == EPKG_OK) { const char *lic_name = pkg_license_name(license); ret = run_prepared_statement(LIC1, lic_name); if (ret == SQLITE_DONE) ret = run_prepared_statement(LIC2, package_id, lic_name); if (ret != SQLITE_DONE) { ERROR_SQLITE(sqlite); retcode = EPKG_FATAL; goto cleanup; } } option = NULL; while (pkg_options(r->pkg, &option) == EPKG_OK) { if (run_prepared_statement(OPTS, pkg_option_opt(option), pkg_option_value(option), package_id) != SQLITE_DONE) { ERROR_SQLITE(sqlite); retcode = EPKG_FATAL; goto cleanup; } } shlib = NULL; while (pkg_shlibs(r->pkg, &shlib) == EPKG_OK) { const char *shlib_name = pkg_shlib_name(shlib); ret = run_prepared_statement(SHLIB1, shlib_name); if (ret == SQLITE_DONE) ret = run_prepared_statement(SHLIB2, package_id, shlib_name); if (ret != SQLITE_DONE) { ERROR_SQLITE(sqlite); retcode = EPKG_FATAL; goto cleanup; } } pkg_free(r->pkg); free(r); } if (pkgdb_transaction_commit(sqlite, NULL) != SQLITE_OK) retcode = EPKG_FATAL; cleanup: if (tids != NULL) { // Cancel running threads if (retcode != EPKG_OK) { pthread_mutex_lock(&thd_data.fts_m); thd_data.stop = true; pthread_mutex_unlock(&thd_data.fts_m); } // Join on threads to release thread IDs for (int i = 0; i < num_workers; i++) { pthread_join(tids[i], NULL); } free(tids); } if (fts != NULL) fts_close(fts); finalize_prepared_statements(); if (sqlite != NULL) sqlite3_close(sqlite); if (errmsg != NULL) sqlite3_free(errmsg); sqlite3_shutdown(); return (retcode); }
/* * Handle 'x' and 't' modes. */ static void read_archive(struct bsdtar *bsdtar, char mode) { FILE *out; struct archive *a; struct archive_entry *entry; const struct stat *st; int r; while (*bsdtar->argv) { include(bsdtar, *bsdtar->argv); bsdtar->argv++; } if (bsdtar->names_from_file != NULL) include_from_file(bsdtar, bsdtar->names_from_file); a = archive_read_new(); if (bsdtar->compress_program != NULL) archive_read_support_compression_program(a, bsdtar->compress_program); else archive_read_support_compression_all(a); archive_read_support_format_all(a); if (archive_read_open_file(a, bsdtar->filename, bsdtar->bytes_per_block != 0 ? bsdtar->bytes_per_block : DEFAULT_BYTES_PER_BLOCK)) bsdtar_errc(bsdtar, 1, 0, "Error opening archive: %s", archive_error_string(a)); do_chdir(bsdtar); for (;;) { /* Support --fast-read option */ if (bsdtar->option_fast_read && unmatched_inclusions(bsdtar) == 0) break; r = archive_read_next_header(a, &entry); if (r == ARCHIVE_EOF) break; if (r < ARCHIVE_OK) bsdtar_warnc(bsdtar, 0, "%s", archive_error_string(a)); if (r <= ARCHIVE_WARN) bsdtar->return_value = 1; if (r == ARCHIVE_RETRY) { /* Retryable error: try again */ bsdtar_warnc(bsdtar, 0, "Retrying..."); continue; } if (r == ARCHIVE_FATAL) break; /* * Exclude entries that are too old. */ st = archive_entry_stat(entry); if (bsdtar->newer_ctime_sec > 0) { if (st->st_ctime < bsdtar->newer_ctime_sec) continue; /* Too old, skip it. */ if (st->st_ctime == bsdtar->newer_ctime_sec && ARCHIVE_STAT_CTIME_NANOS(st) <= bsdtar->newer_ctime_nsec) continue; /* Too old, skip it. */ } if (bsdtar->newer_mtime_sec > 0) { if (st->st_mtime < bsdtar->newer_mtime_sec) continue; /* Too old, skip it. */ if (st->st_mtime == bsdtar->newer_mtime_sec && ARCHIVE_STAT_MTIME_NANOS(st) <= bsdtar->newer_mtime_nsec) continue; /* Too old, skip it. */ } /* * Note that pattern exclusions are checked before * pathname rewrites are handled. This gives more * control over exclusions, since rewrites always lose * information. (For example, consider a rewrite * s/foo[0-9]/foo/. If we check exclusions after the * rewrite, there would be no way to exclude foo1/bar * while allowing foo2/bar.) */ if (excluded(bsdtar, archive_entry_pathname(entry))) continue; /* Excluded by a pattern test. */ /* * Modify the pathname as requested by the user. We * do this for -t as well to give users a way to * preview the effects of their rewrites. We also do * this before extraction security checks (including * leading '/' removal). Note that some rewrite * failures prevent extraction. */ if (edit_pathname(bsdtar, entry)) continue; /* Excluded by a rewrite failure. */ if (mode == 't') { /* Perversely, gtar uses -O to mean "send to stderr" * when used with -t. */ out = bsdtar->option_stdout ? stderr : stdout; if (bsdtar->verbose < 2) safe_fprintf(out, "%s", archive_entry_pathname(entry)); else list_item_verbose(bsdtar, out, entry); fflush(out); r = archive_read_data_skip(a); if (r == ARCHIVE_WARN) { fprintf(out, "\n"); bsdtar_warnc(bsdtar, 0, "%s", archive_error_string(a)); } if (r == ARCHIVE_RETRY) { fprintf(out, "\n"); bsdtar_warnc(bsdtar, 0, "%s", archive_error_string(a)); } if (r == ARCHIVE_FATAL) { fprintf(out, "\n"); bsdtar_warnc(bsdtar, 0, "%s", archive_error_string(a)); bsdtar->return_value = 1; break; } fprintf(out, "\n"); } else { if (bsdtar->option_interactive && !yes("extract '%s'", archive_entry_pathname(entry))) continue; /* * Format here is from SUSv2, including the * deferred '\n'. */ if (bsdtar->verbose) { safe_fprintf(stderr, "x %s", archive_entry_pathname(entry)); fflush(stderr); } if (bsdtar->option_stdout) r = archive_read_data_into_fd(a, 1); else r = archive_read_extract(a, entry, bsdtar->extract_flags); if (r != ARCHIVE_OK) { if (!bsdtar->verbose) safe_fprintf(stderr, "%s", archive_entry_pathname(entry)); safe_fprintf(stderr, ": %s", archive_error_string(a)); if (!bsdtar->verbose) fprintf(stderr, "\n"); bsdtar->return_value = 1; } if (bsdtar->verbose) fprintf(stderr, "\n"); if (r == ARCHIVE_FATAL) break; } } if (bsdtar->verbose > 2) fprintf(stdout, "Archive Format: %s, Compression: %s\n", archive_format_name(a), archive_compression_name(a)); archive_read_finish(a); }
/* * Handle read modes: 'x', 't' and 'p'. */ static void read_archive(struct bsdar *bsdar, char mode) { struct archive *a; struct archive_entry *entry; struct stat sb; struct tm *tp; const char *bname; const char *name; mode_t md; size_t size; time_t mtime; uid_t uid; gid_t gid; char **av; char buf[25]; char find; int flags, r, i; if ((a = archive_read_new()) == NULL) bsdar_errc(bsdar, EX_SOFTWARE, 0, "archive_read_new failed"); archive_read_support_format_ar(a); AC(archive_read_open_filename(a, bsdar->filename, DEF_BLKSZ)); for (;;) { r = archive_read_next_header(a, &entry); if (r == ARCHIVE_WARN || r == ARCHIVE_RETRY || r == ARCHIVE_FATAL) bsdar_warnc(bsdar, archive_errno(a), "%s", archive_error_string(a)); if (r == ARCHIVE_EOF || r == ARCHIVE_FATAL) break; if (r == ARCHIVE_RETRY) { bsdar_warnc(bsdar, 0, "Retrying..."); continue; } if ((name = archive_entry_pathname(entry)) == NULL) break; /* Skip pseudo members. */ if (strcmp(name, "/") == 0 || strcmp(name, "//") == 0 || strcmp(name, "/SYM64/") == 0) continue; if (bsdar->argc > 0) { find = 0; for(i = 0; i < bsdar->argc; i++) { av = &bsdar->argv[i]; if (*av == NULL) continue; if ((bname = basename(*av)) == NULL) bsdar_errc(bsdar, EX_SOFTWARE, errno, "basename failed"); if (strcmp(bname, name) != 0) continue; *av = NULL; find = 1; break; } if (!find) continue; } if (mode == 't') { if (bsdar->options & AR_V) { md = archive_entry_mode(entry); uid = archive_entry_uid(entry); gid = archive_entry_gid(entry); size = archive_entry_size(entry); mtime = archive_entry_mtime(entry); (void)strmode(md, buf); (void)fprintf(stdout, "%s %6d/%-6d %8ju ", buf + 1, uid, gid, (uintmax_t)size); tp = localtime(&mtime); (void)strftime(buf, sizeof(buf), "%b %e %H:%M %Y", tp); (void)fprintf(stdout, "%s %s", buf, name); } else (void)fprintf(stdout, "%s", name); r = archive_read_data_skip(a); if (r == ARCHIVE_WARN || r == ARCHIVE_RETRY || r == ARCHIVE_FATAL) { (void)fprintf(stdout, "\n"); bsdar_warnc(bsdar, archive_errno(a), "%s", archive_error_string(a)); } if (r == ARCHIVE_FATAL) break; (void)fprintf(stdout, "\n"); } else { /* mode == 'x' || mode = 'p' */ if (mode == 'p') { if (bsdar->options & AR_V) { (void)fprintf(stdout, "\n<%s>\n\n", name); fflush(stdout); } r = archive_read_data_into_fd(a, 1); } else { /* mode == 'x' */ if (stat(name, &sb) != 0) { if (errno != ENOENT) { bsdar_warnc(bsdar, 0, "stat %s failed", bsdar->filename); continue; } } else { /* stat success, file exist */ if (bsdar->options & AR_CC) continue; if (bsdar->options & AR_U && archive_entry_mtime(entry) <= sb.st_mtime) continue; } if (bsdar->options & AR_V) (void)fprintf(stdout, "x - %s\n", name); /* Disallow absolute paths. */ if (name[0] == '/') { bsdar_warnc(bsdar, 0, "Absolute path '%s'", name); continue; } /* Basic path security flags. */ flags = ARCHIVE_EXTRACT_SECURE_SYMLINKS | ARCHIVE_EXTRACT_SECURE_NODOTDOT; if (bsdar->options & AR_O) flags |= ARCHIVE_EXTRACT_TIME; r = archive_read_extract(a, entry, flags); } if (r) bsdar_warnc(bsdar, archive_errno(a), "%s", archive_error_string(a)); } } AC(archive_read_close(a)); AC(archive_read_free(a)); }
static int repo_archive_extract_file(int fd, const char *file, const char *dest, const char *repokey, int dest_fd) { struct archive *a = NULL; struct archive_entry *ae = NULL; unsigned char *sig = NULL; int siglen = 0, ret, rc = EPKG_OK; a = archive_read_new(); archive_read_support_filter_all(a); archive_read_support_format_tar(a); /* Seek to the begin of file */ (void)lseek(fd, 0, SEEK_SET); archive_read_open_fd(a, fd, 4096); while (archive_read_next_header(a, &ae) == ARCHIVE_OK) { if (strcmp(archive_entry_pathname(ae), file) == 0) { if (dest_fd == -1) { archive_entry_set_pathname(ae, dest); /* * The repo should be owned by root and not writable */ archive_entry_set_uid(ae, 0); archive_entry_set_gid(ae, 0); archive_entry_set_perm(ae, 0644); if (archive_read_extract(a, ae, EXTRACT_ARCHIVE_FLAGS) != 0) { pkg_emit_errno("archive_read_extract", "extract error"); rc = EPKG_FATAL; goto cleanup; } } else { if (archive_read_data_into_fd(a, dest_fd) != 0) { pkg_emit_errno("archive_read_extract", "extract error"); rc = EPKG_FATAL; goto cleanup; } (void)lseek(dest_fd, 0, SEEK_SET); } } if (strcmp(archive_entry_pathname(ae), "signature") == 0) { siglen = archive_entry_size(ae); sig = malloc(siglen); archive_read_data(a, sig, siglen); } } if (repokey != NULL) { if (sig != NULL) { ret = rsa_verify(dest, repokey, sig, siglen - 1, dest_fd); if (ret != EPKG_OK) { pkg_emit_error("Invalid signature, " "removing repository."); if (dest != NULL) unlink(dest); free(sig); rc = EPKG_FATAL; goto cleanup; } free(sig); } else { pkg_emit_error("No signature found in the repository. " "Can not validate against %s key.", repokey); rc = EPKG_FATAL; if (dest != NULL) unlink(dest); goto cleanup; } } cleanup: if (a != NULL) archive_read_free(a); return rc; }
/* * ai_utils_extract_archive: * * Extracts an archive to a directory, fast. */ gboolean ai_utils_extract_archive (const gchar *filename, const gchar *directory, GError **error) { gboolean ret = FALSE; struct archive *arch = NULL; struct archive_entry *entry; int r; int retval; gchar *retcwd; gchar buf[PATH_MAX]; /* save the PWD as we chdir to extract */ retcwd = getcwd (buf, PATH_MAX); if (retcwd == NULL) { g_set_error_literal (error, 1, 0, "failed to get cwd"); goto out; } /* we can only read tar achives */ arch = archive_read_new (); archive_read_support_format_all (arch); archive_read_support_compression_all (arch); /* open the tar file */ r = archive_read_open_file (arch, filename, BLOCK_SIZE); if (r) { g_set_error (error, 1, 0, "cannot open: %s", archive_error_string (arch)); goto out; } /* switch to our destination directory */ retval = chdir (directory); if (retval != 0) { g_set_error (error, 1, 0, "failed chdir to %s", directory); goto out; } /* decompress each file */ for (;;) { r = archive_read_next_header (arch, &entry); if (r == ARCHIVE_EOF) break; if (r != ARCHIVE_OK) { g_set_error (error, 1, 0, "cannot read header: %s", archive_error_string (arch)); goto out; } r = archive_read_extract (arch, entry, 0); if (r != ARCHIVE_OK) { g_set_error (error, 1, 0, "cannot extract: %s", archive_error_string (arch)); goto out; } } /* completed all okay */ ret = TRUE; out: /* close the archive */ if (arch != NULL) { archive_read_close (arch); archive_read_finish (arch); } /* switch back to PWD */ retval = chdir (buf); if (retval != 0) g_warning ("cannot chdir back!"); return ret; }
int cake_insert(cake_config_t *config, cake_package_t *package, char *filename, int flags) { struct archive *archive; struct archive_entry *entry; struct stat st, vst; char *pathname; char *orig_cwd = get_current_dir_name(); pkginfo_t *pkg = NULL; // Open archive archive = archive_read_new(); archive_read_support_compression_all(archive); archive_read_support_format_tar(archive); archive_read_open_filename(archive, filename, 64); chdir(config->install_root); // Check for existing files while (archive_read_next_header(archive, &entry) == ARCHIVE_OK) { pathname = archive_entry_pathname(entry); vst = *archive_entry_stat(entry); // Parse PKGINFO if (!strcmp(pathname, ".PKGINFO")) { int size = archive_entry_size(entry); char buffer[size]; archive_read_data(archive, buffer, size); pkg = pkginfo_read_buffer(buffer); } else { archive_read_data_skip(archive); } } archive_read_finish(archive); // Check if we found pkginfo char *pkgname = pkginfo_get(pkg, "pkgname"); if (pkg == NULL || pkgname == NULL) { fputs("Invalid package, PKGINFO not found or invalid.\n", stderr); return -1; } // Get package specifier cake_package_t *pkgdesc; if (package != NULL) { pkgdesc = package; if (pkgdesc->name == NULL) pkgdesc->name = pkgname; if (pkgdesc->branch == NULL) pkgdesc->branch = "__LOCAL__"; } else { pkgdesc = malloc(sizeof(cake_package_t)); pkgdesc->name = pkgname; pkgdesc->branch = "__LOCAL__"; pkgdesc->server = cake_config_get_server(config, "__LOCAL__"); } // Remove existing package if (cakedb_has(config, pkgdesc)) cake_remove(config, pkgdesc, 0); // Reopen archive chdir(orig_cwd); archive = archive_read_new(); archive_read_support_compression_all(archive); archive_read_support_format_tar(archive); archive_read_open_filename(archive, filename, 64); chdir(config->install_root); // Check for existing files while (archive_read_next_header(archive, &entry) == ARCHIVE_OK) { pathname = archive_entry_pathname(entry); vst = *archive_entry_stat(entry); // Check file existence if (!(flags & CAKE_INSERT_FORCE) && !(vst.st_mode & S_IFDIR) && stat(pathname, &st) == 0) { fprintf(stderr, "Error: file %s%s exists on disk.\n", config->install_root, pathname); return -1; } } archive_read_finish(archive); // Holds all the installed filenames FILE *files = cakedb_open(config, pkgdesc, "files", "w"); // Check if file could be opened if (files == NULL) { fputs("Unable to create database entry, permission denied?\n", stderr); return -1; } // Source origin for the package FILE *sourceloc = cakedb_open(config, pkgdesc, "origin", "w"); fprintf(sourceloc, "%s\n%s\n%s\n", pkgdesc->name, pkgdesc->branch, pkgdesc->server == NULL ? "__LOCAL__" : pkgdesc->server->name); fclose(sourceloc); // Reopen archive chdir(orig_cwd); archive = archive_read_new(); archive_read_support_compression_all(archive); archive_read_support_format_tar(archive); archive_read_open_filename(archive, filename, 64); chdir(config->install_root); // Extract files while (archive_read_next_header(archive, &entry) == ARCHIVE_OK) { pathname = strdup(archive_entry_pathname(entry)); vst = *archive_entry_stat(entry); // Write PKGINFO if (!strcmp(pathname, ".PKGINFO")) { FILE *pkginfo = cakedb_open(config, pkgdesc, "pkginfo", "w"); archive_read_data_into_fd(archive, fileno(pkginfo)); continue; } // Ignore root dotfiles if (pathname[0] == '.') { archive_read_data_skip(archive); continue; } // Move /etc to /usr/etc if (!strncmp(pathname, "etc/", 4)) { if (flags & CAKE_INSERT_INSTALL_CONFIGS) { // Verbose output if (flags & CAKE_INSERT_VERBOSE && pathname[strlen(pathname)-1] != '/') printf("Installing configuration %s\n", pathname); } else { char *newpath = pathname; newpath = malloc(strlen(pathname)+5); sprintf(newpath, "usr/%s", pathname); archive_entry_set_pathname(entry, newpath); } } else { // Verbose output if (flags & CAKE_INSERT_VERBOSE_VERBOSE) printf("Extracting %s\n", pathname); } archive_read_extract(archive, entry, ARCHIVE_EXTRACT_PERM | ARCHIVE_EXTRACT_TIME | ARCHIVE_EXTRACT_ACL | ARCHIVE_EXTRACT_XATTR | ARCHIVE_EXTRACT_UNLINK); fputs(pathname, files); fputc('\n', files); } // Close archive fclose(files); archive_read_finish(archive); return 0; }
/* * Handle 'x' and 't' modes. */ static void read_archive(struct bsdtar *bsdtar, char mode) { struct progress_data progress_data; FILE *out; struct archive *a; struct archive_entry *entry; const struct stat *st; int r; while (*bsdtar->argv) { lafe_include(&bsdtar->matching, *bsdtar->argv); bsdtar->argv++; } if (bsdtar->names_from_file != NULL) lafe_include_from_file(&bsdtar->matching, bsdtar->names_from_file, bsdtar->option_null); a = archive_read_new(); if (bsdtar->compress_program != NULL) archive_read_support_compression_program(a, bsdtar->compress_program); else archive_read_support_compression_all(a); archive_read_support_format_all(a); if (ARCHIVE_OK != archive_read_set_options(a, bsdtar->option_options)) lafe_errc(1, 0, "%s", archive_error_string(a)); if (archive_read_open_file(a, bsdtar->filename, bsdtar->bytes_per_block != 0 ? bsdtar->bytes_per_block : DEFAULT_BYTES_PER_BLOCK)) lafe_errc(1, 0, "Error opening archive: %s", archive_error_string(a)); do_chdir(bsdtar); if (mode == 'x') { /* Set an extract callback so that we can handle SIGINFO. */ progress_data.bsdtar = bsdtar; progress_data.archive = a; archive_read_extract_set_progress_callback(a, progress_func, &progress_data); } if (mode == 'x' && bsdtar->option_chroot) { #if HAVE_CHROOT if (chroot(".") != 0) lafe_errc(1, errno, "Can't chroot to \".\""); #else lafe_errc(1, 0, "chroot isn't supported on this platform"); #endif } for (;;) { /* Support --fast-read option */ if (bsdtar->option_fast_read && lafe_unmatched_inclusions(bsdtar->matching) == 0) break; r = archive_read_next_header(a, &entry); progress_data.entry = entry; if (r == ARCHIVE_EOF) break; if (r < ARCHIVE_OK) lafe_warnc(0, "%s", archive_error_string(a)); if (r <= ARCHIVE_WARN) bsdtar->return_value = 1; if (r == ARCHIVE_RETRY) { /* Retryable error: try again */ lafe_warnc(0, "Retrying..."); continue; } if (r == ARCHIVE_FATAL) break; if (bsdtar->uid >= 0) { archive_entry_set_uid(entry, bsdtar->uid); archive_entry_set_uname(entry, NULL); } if (bsdtar->gid >= 0) { archive_entry_set_gid(entry, bsdtar->gid); archive_entry_set_gname(entry, NULL); } if (bsdtar->uname) archive_entry_set_uname(entry, bsdtar->uname); if (bsdtar->gname) archive_entry_set_gname(entry, bsdtar->gname); /* * Exclude entries that are too old. */ st = archive_entry_stat(entry); if (bsdtar->newer_ctime_sec > 0) { if (st->st_ctime < bsdtar->newer_ctime_sec) continue; /* Too old, skip it. */ if (st->st_ctime == bsdtar->newer_ctime_sec && ARCHIVE_STAT_CTIME_NANOS(st) <= bsdtar->newer_ctime_nsec) continue; /* Too old, skip it. */ } if (bsdtar->newer_mtime_sec > 0) { if (st->st_mtime < bsdtar->newer_mtime_sec) continue; /* Too old, skip it. */ if (st->st_mtime == bsdtar->newer_mtime_sec && ARCHIVE_STAT_MTIME_NANOS(st) <= bsdtar->newer_mtime_nsec) continue; /* Too old, skip it. */ } /* * Note that pattern exclusions are checked before * pathname rewrites are handled. This gives more * control over exclusions, since rewrites always lose * information. (For example, consider a rewrite * s/foo[0-9]/foo/. If we check exclusions after the * rewrite, there would be no way to exclude foo1/bar * while allowing foo2/bar.) */ if (lafe_excluded(bsdtar->matching, archive_entry_pathname(entry))) continue; /* Excluded by a pattern test. */ if (mode == 't') { /* Perversely, gtar uses -O to mean "send to stderr" * when used with -t. */ out = bsdtar->option_stdout ? stderr : stdout; /* * TODO: Provide some reasonable way to * preview rewrites. gtar always displays * the unedited path in -t output, which means * you cannot easily preview rewrites. */ if (bsdtar->verbose < 2) safe_fprintf(out, "%s", archive_entry_pathname(entry)); else list_item_verbose(bsdtar, out, entry); fflush(out); r = archive_read_data_skip(a); if (r == ARCHIVE_WARN) { fprintf(out, "\n"); lafe_warnc(0, "%s", archive_error_string(a)); } if (r == ARCHIVE_RETRY) { fprintf(out, "\n"); lafe_warnc(0, "%s", archive_error_string(a)); } if (r == ARCHIVE_FATAL) { fprintf(out, "\n"); lafe_warnc(0, "%s", archive_error_string(a)); bsdtar->return_value = 1; break; } fprintf(out, "\n"); } else { /* Note: some rewrite failures prevent extraction. */ if (edit_pathname(bsdtar, entry)) continue; /* Excluded by a rewrite failure. */ if (bsdtar->option_interactive && !yes("extract '%s'", archive_entry_pathname(entry))) continue; /* * Format here is from SUSv2, including the * deferred '\n'. */ if (bsdtar->verbose) { safe_fprintf(stderr, "x %s", archive_entry_pathname(entry)); fflush(stderr); } // TODO siginfo_printinfo(bsdtar, 0); if (bsdtar->option_stdout) r = archive_read_data_into_fd(a, 1); else r = archive_read_extract(a, entry, bsdtar->extract_flags); if (r != ARCHIVE_OK) { if (!bsdtar->verbose) safe_fprintf(stderr, "%s", archive_entry_pathname(entry)); safe_fprintf(stderr, ": %s", archive_error_string(a)); if (!bsdtar->verbose) fprintf(stderr, "\n"); bsdtar->return_value = 1; } if (bsdtar->verbose) fprintf(stderr, "\n"); if (r == ARCHIVE_FATAL) break; } } r = archive_read_close(a); if (r != ARCHIVE_OK) lafe_warnc(0, "%s", archive_error_string(a)); if (r <= ARCHIVE_WARN) bsdtar->return_value = 1; if (bsdtar->verbose > 2) fprintf(stdout, "Archive Format: %s, Compression: %s\n", archive_format_name(a), archive_compression_name(a)); archive_read_finish(a); }
static int update_from_remote_repo(const char *name, const char *url) { struct archive *a = NULL; struct archive_entry *ae = NULL; char repofile[MAXPATHLEN]; char repofile_unchecked[MAXPATHLEN]; char tmp[21]; const char *dbdir = NULL; unsigned char *sig = NULL; int siglen = 0; int rc = EPKG_OK; (void)strlcpy(tmp, "/tmp/repo.txz.XXXXXX", sizeof(tmp)); if (mktemp(tmp) == NULL) { warnx("Could not create temporary file %s, aborting update.\n", tmp); return (EPKG_FATAL); } if (pkg_config_string(PKG_CONFIG_DBDIR, &dbdir) != EPKG_OK) { warnx("Cant get dbdir config entry"); return (EPKG_FATAL); } if (pkg_fetch_file(url, tmp) != EPKG_OK) { /* * No need to unlink(tmp) here as it is already * done in pkg_fetch_file() in case fetch failed. */ return (EPKG_FATAL); } a = archive_read_new(); archive_read_support_compression_all(a); archive_read_support_format_tar(a); archive_read_open_filename(a, tmp, 4096); while (archive_read_next_header(a, &ae) == ARCHIVE_OK) { if (strcmp(archive_entry_pathname(ae), "repo.sqlite") == 0) { snprintf(repofile, sizeof(repofile), "%s/%s.sqlite", dbdir, name); snprintf(repofile_unchecked, sizeof(repofile_unchecked), "%s.unchecked", repofile); archive_entry_set_pathname(ae, repofile_unchecked); archive_read_extract(a, ae, EXTRACT_ARCHIVE_FLAGS); } if (strcmp(archive_entry_pathname(ae), "signature") == 0) { siglen = archive_entry_size(ae); sig = malloc(siglen); archive_read_data(a, sig, siglen); } } if (sig != NULL) { if (pkg_repo_verify(repofile_unchecked, sig, siglen - 1) != EPKG_OK) { warnx("Invalid signature, removing repository.\n"); unlink(repofile_unchecked); free(sig); rc = EPKG_FATAL; goto cleanup; } } rename(repofile_unchecked, repofile); cleanup: if (a != NULL) archive_read_finish(a); (void)unlink(tmp); return (rc); }
/** Unpack a list of files in an archive. * @param handle the context handle * @param path the archive to unpack * @param prefix where to extract the files * @param list a list of files within the archive to unpack or NULL for all * @param breakfirst break after the first entry found * @return 0 on success, 1 on failure */ int _alpm_unpack(alpm_handle_t *handle, const char *path, const char *prefix, alpm_list_t *list, int breakfirst) { int ret = 0; mode_t oldmask; struct archive *archive; struct archive_entry *entry; struct stat buf; int fd, cwdfd; fd = _alpm_open_archive(handle, path, &buf, &archive, ALPM_ERR_PKG_OPEN); if(fd < 0) { return 1; } oldmask = umask(0022); /* save the cwd so we can restore it later */ OPEN(cwdfd, ".", O_RDONLY); if(cwdfd < 0) { _alpm_log(handle, ALPM_LOG_ERROR, _("could not get current working directory\n")); } /* just in case our cwd was removed in the upgrade operation */ if(chdir(prefix) != 0) { _alpm_log(handle, ALPM_LOG_ERROR, _("could not change directory to %s (%s)\n"), prefix, strerror(errno)); ret = 1; goto cleanup; } while(archive_read_next_header(archive, &entry) == ARCHIVE_OK) { const char *entryname; mode_t mode; entryname = archive_entry_pathname(entry); /* If specific files were requested, skip entries that don't match. */ if(list) { char *entry_prefix = strdup(entryname); char *p = strstr(entry_prefix,"/"); if(p) { *(p+1) = '\0'; } char *found = alpm_list_find_str(list, entry_prefix); free(entry_prefix); if(!found) { if(archive_read_data_skip(archive) != ARCHIVE_OK) { ret = 1; goto cleanup; } continue; } else { _alpm_log(handle, ALPM_LOG_DEBUG, "extracting: %s\n", entryname); } } mode = archive_entry_mode(entry); if(S_ISREG(mode)) { archive_entry_set_perm(entry, 0644); } else if(S_ISDIR(mode)) { archive_entry_set_perm(entry, 0755); } /* Extract the archive entry. */ int readret = archive_read_extract(archive, entry, 0); if(readret == ARCHIVE_WARN) { /* operation succeeded but a non-critical error was encountered */ _alpm_log(handle, ALPM_LOG_WARNING, _("warning given when extracting %s (%s)\n"), entryname, archive_error_string(archive)); } else if(readret != ARCHIVE_OK) { _alpm_log(handle, ALPM_LOG_ERROR, _("could not extract %s (%s)\n"), entryname, archive_error_string(archive)); ret = 1; goto cleanup; } if(breakfirst) { break; } } cleanup: umask(oldmask); archive_read_finish(archive); CLOSE(fd); if(cwdfd >= 0) { if(fchdir(cwdfd) != 0) { _alpm_log(handle, ALPM_LOG_ERROR, _("could not restore working directory (%s)\n"), strerror(errno)); } CLOSE(cwdfd); } return ret; }
/* * Handle read modes: 'x', 't' and 'p'. */ void ar_read_archive(struct bsdar *bsdar, int mode) { FILE *out; struct archive *a; struct archive_entry *entry; struct stat sb; struct tm *tp; const char *bname; const char *name; mode_t md; size_t size; time_t mtime; uid_t uid; gid_t gid; char **av; char buf[25]; char find; int i, flags, r; assert(mode == 'p' || mode == 't' || mode == 'x'); if ((a = archive_read_new()) == NULL) bsdar_errc(bsdar, 0, "archive_read_new failed"); archive_read_support_compression_none(a); archive_read_support_format_ar(a); AC(archive_read_open_file(a, bsdar->filename, DEF_BLKSZ)); out = bsdar->output; for (;;) { r = archive_read_next_header(a, &entry); if (r == ARCHIVE_WARN || r == ARCHIVE_RETRY || r == ARCHIVE_FATAL) bsdar_warnc(bsdar, 0, "%s", archive_error_string(a)); if (r == ARCHIVE_EOF || r == ARCHIVE_FATAL) break; if (r == ARCHIVE_RETRY) { bsdar_warnc(bsdar, 0, "Retrying..."); continue; } if (archive_format(a) == ARCHIVE_FORMAT_AR_BSD) bsdar->options |= AR_BSD; else bsdar->options &= ~AR_BSD; name = archive_entry_pathname(entry); /* Skip pseudo members. */ if (bsdar_is_pseudomember(bsdar, name)) continue; if (bsdar->argc > 0) { find = 0; for(i = 0; i < bsdar->argc; i++) { av = &bsdar->argv[i]; if (*av == NULL) continue; if ((bname = basename(*av)) == NULL) bsdar_errc(bsdar, errno, "basename failed"); if (strcmp(bname, name) != 0) continue; *av = NULL; find = 1; break; } if (!find) continue; } if (mode == 't') { if (bsdar->options & AR_V) { md = archive_entry_mode(entry); uid = archive_entry_uid(entry); gid = archive_entry_gid(entry); size = archive_entry_size(entry); mtime = archive_entry_mtime(entry); (void)fprintf(out, "%s %6d/%-6d %8ju ", bsdar_strmode(md) + 1, uid, gid, (uintmax_t)size); tp = localtime(&mtime); (void)strftime(buf, sizeof(buf), "%b %e %H:%M %Y", tp); (void)fprintf(out, "%s %s", buf, name); } else (void)fprintf(out, "%s", name); r = archive_read_data_skip(a); if (r == ARCHIVE_WARN || r == ARCHIVE_RETRY || r == ARCHIVE_FATAL) { (void)fprintf(out, "\n"); bsdar_warnc(bsdar, 0, "%s", archive_error_string(a)); } if (r == ARCHIVE_FATAL) break; (void)fprintf(out, "\n"); } else { /* mode == 'x' || mode = 'p' */ if (mode == 'p') { if (bsdar->options & AR_V) { (void)fprintf(out, "\n<%s>\n\n", name); fflush(out); } r = archive_read_data_into_fd(a, fileno(out)); } else { /* mode == 'x' */ if (stat(name, &sb) != 0) { if (errno != ENOENT) { bsdar_warnc(bsdar, 0, "stat %s failed", bsdar->filename); continue; } } else { /* stat success, file exist */ if (bsdar->options & AR_CC) continue; if (bsdar->options & AR_U && archive_entry_mtime(entry) <= sb.st_mtime) continue; } if (bsdar->options & AR_V) (void)fprintf(out, "x - %s\n", name); flags = 0; if (bsdar->options & AR_O) flags |= ARCHIVE_EXTRACT_TIME; r = archive_read_extract(a, entry, flags); } if (r) bsdar_warnc(bsdar, 0, "%s", archive_error_string(a)); } } AC(archive_read_close(a)); ACV(archive_read_finish(a)); }
int pkg_update(const char *name, const char *packagesite, bool force) { char url[MAXPATHLEN]; struct archive *a = NULL; struct archive_entry *ae = NULL; char repofile[MAXPATHLEN]; char repofile_unchecked[MAXPATHLEN]; char tmp[MAXPATHLEN]; const char *dbdir = NULL; const char *repokey; unsigned char *sig = NULL; int siglen = 0; int fd, rc = EPKG_FATAL, ret; struct stat st; time_t t = 0; sqlite3 *sqlite; char *archreq = NULL; const char *myarch; int64_t res; const char *tmpdir; snprintf(url, MAXPATHLEN, "%s/repo.txz", packagesite); tmpdir = getenv("TMPDIR"); if (tmpdir == NULL) tmpdir = "/tmp"; strlcpy(tmp, tmpdir, sizeof(tmp)); strlcat(tmp, "/repo.txz.XXXXXX", sizeof(tmp)); fd = mkstemp(tmp); if (fd == -1) { pkg_emit_error("Could not create temporary file %s, " "aborting update.\n", tmp); return (EPKG_FATAL); } if (pkg_config_string(PKG_CONFIG_DBDIR, &dbdir) != EPKG_OK) { pkg_emit_error("Cant get dbdir config entry"); return (EPKG_FATAL); } snprintf(repofile, sizeof(repofile), "%s/%s.sqlite", dbdir, name); if (force) t = 0; /* Always fetch */ else { if (stat(repofile, &st) != -1) { t = st.st_mtime; /* add 1 minute to the timestamp because * repo.sqlite is always newer than repo.txz, * 1 minute should be enough. */ t += 60; } } rc = pkg_fetch_file_to_fd(url, fd, t); close(fd); if (rc != EPKG_OK) { goto cleanup; } if (eaccess(repofile, W_OK) == -1) { pkg_emit_error("Insufficient privilege to update %s\n", repofile); rc = EPKG_ENOACCESS; goto cleanup; } a = archive_read_new(); archive_read_support_compression_all(a); archive_read_support_format_tar(a); archive_read_open_filename(a, tmp, 4096); while (archive_read_next_header(a, &ae) == ARCHIVE_OK) { if (strcmp(archive_entry_pathname(ae), "repo.sqlite") == 0) { snprintf(repofile_unchecked, sizeof(repofile_unchecked), "%s.unchecked", repofile); archive_entry_set_pathname(ae, repofile_unchecked); /* * The repo should be owned by root and not writable */ archive_entry_set_uid(ae, 0); archive_entry_set_gid(ae, 0); archive_entry_set_perm(ae, 0644); archive_read_extract(a, ae, EXTRACT_ARCHIVE_FLAGS); } if (strcmp(archive_entry_pathname(ae), "signature") == 0) { siglen = archive_entry_size(ae); sig = malloc(siglen); archive_read_data(a, sig, siglen); } } if (pkg_config_string(PKG_CONFIG_REPOKEY, &repokey) != EPKG_OK) { free(sig); return (EPKG_FATAL); } if (repokey != NULL) { if (sig != NULL) { ret = rsa_verify(repofile_unchecked, repokey, sig, siglen - 1); if (ret != EPKG_OK) { pkg_emit_error("Invalid signature, " "removing repository.\n"); unlink(repofile_unchecked); free(sig); rc = EPKG_FATAL; goto cleanup; } free(sig); } else { pkg_emit_error("No signature found in the repository. " "Can not validate against %s key.", repokey); rc = EPKG_FATAL; unlink(repofile_unchecked); goto cleanup; } } /* check is the repository is for valid architecture */ sqlite3_initialize(); if (sqlite3_open(repofile_unchecked, &sqlite) != SQLITE_OK) { unlink(repofile_unchecked); pkg_emit_error("Corrupted repository"); rc = EPKG_FATAL; goto cleanup; } pkg_config_string(PKG_CONFIG_ABI, &myarch); archreq = sqlite3_mprintf("select count(arch) from packages " "where arch not GLOB '%q'", myarch); if (get_pragma(sqlite, archreq, &res) != EPKG_OK) { sqlite3_free(archreq); pkg_emit_error("Unable to query repository"); rc = EPKG_FATAL; sqlite3_close(sqlite); goto cleanup; } if (res > 0) { pkg_emit_error("At least one of the packages provided by" "the repository is not compatible with your abi: %s", myarch); rc = EPKG_FATAL; sqlite3_close(sqlite); goto cleanup; } sqlite3_close(sqlite); sqlite3_shutdown(); if (rename(repofile_unchecked, repofile) != 0) { pkg_emit_errno("rename", ""); rc = EPKG_FATAL; goto cleanup; } if ((rc = remote_add_indexes(name)) != EPKG_OK) goto cleanup; rc = EPKG_OK; cleanup: if (a != NULL) archive_read_finish(a); (void)unlink(tmp); return (rc); }
static int unpack_archive(struct xbps_handle *xhp, xbps_dictionary_t pkg_repod, const char *pkgver, const char *fname, struct archive *ar) { xbps_dictionary_t propsd, filesd, old_filesd; xbps_array_t array, obsoletes; xbps_object_t obj; void *instbuf = NULL, *rembuf = NULL; struct stat st; struct xbps_unpack_cb_data xucd; struct archive_entry *entry; size_t i, entry_idx = 0, instbufsiz = 0, rembufsiz = 0; ssize_t entry_size; const char *file, *entry_pname, *transact, *tgtlnk; char *pkgname, *dname, *buf, *buf2, *p, *p2; int ar_rv, rv, entry_type, flags; bool preserve, update, conf_file, file_exists, skip_obsoletes; bool softreplace, skip_extract, force, metafile; uid_t euid; assert(xbps_object_type(pkg_repod) == XBPS_TYPE_DICTIONARY); assert(ar != NULL); propsd = filesd = old_filesd = NULL; force = preserve = update = conf_file = file_exists = false; skip_obsoletes = softreplace = metafile = false; xbps_dictionary_get_bool(pkg_repod, "preserve", &preserve); xbps_dictionary_get_bool(pkg_repod, "skip-obsoletes", &skip_obsoletes); xbps_dictionary_get_bool(pkg_repod, "softreplace", &softreplace); xbps_dictionary_get_cstring_nocopy(pkg_repod, "transaction", &transact); euid = geteuid(); pkgname = xbps_pkg_name(pkgver); assert(pkgname); if (xhp->flags & XBPS_FLAG_FORCE_UNPACK) force = true; if (xhp->unpack_cb != NULL) { /* initialize data for unpack cb */ memset(&xucd, 0, sizeof(xucd)); } if (access(xhp->rootdir, R_OK) == -1) { if (errno != ENOENT) { rv = errno; goto out; } if (xbps_mkpath(xhp->rootdir, 0750) == -1) { rv = errno; goto out; } } if (chdir(xhp->rootdir) == -1) { xbps_set_cb_state(xhp, XBPS_STATE_UNPACK_FAIL, errno, pkgver, "%s: [unpack] failed to chdir to rootdir `%s': %s", pkgver, xhp->rootdir, strerror(errno)); rv = errno; goto out; } if (strcmp(transact, "update") == 0) update = true; /* * Process the archive files. */ flags = set_extract_flags(euid); for (;;) { ar_rv = archive_read_next_header(ar, &entry); if (ar_rv == ARCHIVE_EOF || ar_rv == ARCHIVE_FATAL) break; else if (ar_rv == ARCHIVE_RETRY) continue; entry_pname = archive_entry_pathname(entry); entry_size = archive_entry_size(entry); entry_type = archive_entry_filetype(entry); /* * Ignore directories from archive. */ if (entry_type == AE_IFDIR) { archive_read_data_skip(ar); continue; } if (strcmp("./INSTALL", entry_pname) == 0) { /* * Store file in a buffer and execute * the "pre" action from it. */ instbufsiz = entry_size; instbuf = malloc(entry_size); assert(instbuf); if (archive_read_data(ar, instbuf, entry_size) != entry_size) { rv = EINVAL; goto out; } rv = xbps_pkg_exec_buffer(xhp, instbuf, instbufsiz, pkgver, "pre", update); if (rv != 0) { xbps_set_cb_state(xhp, XBPS_STATE_UNPACK_FAIL, rv, pkgver, "%s: [unpack] INSTALL script failed " "to execute pre ACTION: %s", pkgver, strerror(rv)); goto out; } continue; } else if (strcmp("./REMOVE", entry_pname) == 0) { /* store file in a buffer */ rembufsiz = entry_size; rembuf = malloc(entry_size); assert(rembuf); if (archive_read_data(ar, rembuf, entry_size) != entry_size) { rv = EINVAL; goto out; } continue; } else if (strcmp("./files.plist", entry_pname) == 0) { filesd = xbps_archive_get_dictionary(ar, entry); if (filesd == NULL) { rv = errno; goto out; } continue; } else if (strcmp("./props.plist", entry_pname) == 0) { propsd = xbps_archive_get_dictionary(ar, entry); if (propsd == NULL) { rv = errno; goto out; } continue; } /* * XXX: duplicate code. * Create the metaplist file before unpacking any real file. */ if (propsd && filesd && !metafile) { rv = create_pkg_metaplist(xhp, pkgname, pkgver, propsd, filesd, instbuf, instbufsiz, rembuf, rembufsiz); if (rv != 0) { xbps_set_cb_state(xhp, XBPS_STATE_UNPACK_FAIL, rv, pkgver, "%s: [unpack] failed to create metaplist file: %s", pkgver, strerror(rv)); goto out; } metafile = true; } /* * If XBPS_PKGFILES or XBPS_PKGPROPS weren't found * in the archive at this phase, skip all data. */ if (propsd == NULL || filesd == NULL) { archive_read_data_skip(ar); /* * If we have processed 4 entries and the two * required metadata files weren't found, bail out. * This is not an XBPS binary package. */ if (entry_idx >= 3) { xbps_set_cb_state(xhp, XBPS_STATE_UNPACK_FAIL, ENODEV, pkgver, "%s: [unpack] invalid binary package `%s'.", pkgver, fname); rv = ENODEV; goto out; } entry_idx++; continue; } /* * Prepare unpack callback ops. */ if (xhp->unpack_cb != NULL) { xucd.xhp = xhp; xucd.pkgver = pkgver; xucd.entry = entry_pname; xucd.entry_size = entry_size; xucd.entry_is_conf = false; } /* * Compute total entries in progress data, if set. * total_entries = files + conf_files + links. */ if (xhp->unpack_cb != NULL) { xucd.entry_total_count = 0; array = xbps_dictionary_get(filesd, "files"); xucd.entry_total_count += (ssize_t)xbps_array_count(array); array = xbps_dictionary_get(filesd, "conf_files"); xucd.entry_total_count += (ssize_t)xbps_array_count(array); array = xbps_dictionary_get(filesd, "links"); xucd.entry_total_count += (ssize_t)xbps_array_count(array); } /* * Always check that extracted file exists and hash * doesn't match, in that case overwrite the file. * Otherwise skip extracting it. */ conf_file = skip_extract = file_exists = false; if (lstat(entry_pname, &st) == 0) file_exists = true; if (!force && (entry_type == AE_IFREG)) { buf = strchr(entry_pname, '.') + 1; assert(buf != NULL); if (file_exists) { /* * Handle configuration files. Check if current * entry is a configuration file and take action * if required. Skip packages that don't have * "conf_files" array on its XBPS_PKGPROPS * dictionary. */ if (xbps_entry_is_a_conf_file(propsd, buf)) { conf_file = true; if (xhp->unpack_cb != NULL) xucd.entry_is_conf = true; rv = xbps_entry_install_conf_file(xhp, filesd, entry, entry_pname, pkgver, pkgname); if (rv == -1) { /* error */ goto out; } else if (rv == 0) { /* * Keep curfile as is. */ skip_extract = true; } } else { rv = xbps_file_hash_check_dictionary( xhp, filesd, "files", buf); if (rv == -1) { /* error */ xbps_dbg_printf(xhp, "%s: failed to check" " hash for `%s': %s\n", pkgver, entry_pname, strerror(errno)); goto out; } else if (rv == 0) { /* * hash match, skip extraction. */ xbps_dbg_printf(xhp, "%s: file %s " "matches existing SHA256, " "skipping...\n", pkgver, entry_pname); skip_extract = true; } } } } else if (!force && (entry_type == AE_IFLNK)) { /* * Check if current link from binpkg hasn't been * modified, otherwise extract new link. */ buf = realpath(entry_pname, NULL); if (buf) { if (strcmp(xhp->rootdir, "/")) { p = buf; p += strlen(xhp->rootdir); } else p = buf; tgtlnk = find_pkg_symlink_target(filesd, entry_pname); assert(tgtlnk); if (strncmp(tgtlnk, "./", 2) == 0) { buf2 = strdup(entry_pname); assert(buf2); dname = dirname(buf2); p2 = xbps_xasprintf("%s/%s", dname, tgtlnk); free(buf2); } else { p2 = strdup(tgtlnk); assert(p2); } xbps_dbg_printf(xhp, "%s: symlink %s cur: %s " "new: %s\n", pkgver, entry_pname, p, p2); if (strcmp(p, p2) == 0) { xbps_dbg_printf(xhp, "%s: symlink " "%s matched, skipping...\n", pkgver, entry_pname); skip_extract = true; } free(buf); free(p2); } } /* * Check if current file mode differs from file mode * in binpkg and apply perms if true. */ if (!force && file_exists && skip_extract && (archive_entry_mode(entry) != st.st_mode)) { if (chmod(entry_pname, archive_entry_mode(entry)) != 0) { xbps_dbg_printf(xhp, "%s: failed " "to set perms %s to %s: %s\n", pkgver, archive_entry_strmode(entry), entry_pname, strerror(errno)); rv = EINVAL; goto out; } xbps_dbg_printf(xhp, "%s: entry %s changed file " "mode to %s.\n", pkgver, entry_pname, archive_entry_strmode(entry)); } /* * Check if current uid/gid differs from file in binpkg, * and change permissions if true. */ if ((!force && file_exists && skip_extract && (euid == 0)) && (((archive_entry_uid(entry) != st.st_uid)) || ((archive_entry_gid(entry) != st.st_gid)))) { if (lchown(entry_pname, archive_entry_uid(entry), archive_entry_gid(entry)) != 0) { xbps_dbg_printf(xhp, "%s: failed " "to set uid/gid to %u:%u (%s)\n", pkgver, archive_entry_uid(entry), archive_entry_gid(entry), strerror(errno)); } else { xbps_dbg_printf(xhp, "%s: entry %s changed " "uid/gid to %u:%u.\n", pkgver, entry_pname, archive_entry_uid(entry), archive_entry_gid(entry)); } } if (!update && conf_file && file_exists && !skip_extract) { /* * If installing new package preserve old configuration * file but renaming it to <file>.old. */ buf = xbps_xasprintf("%s.old", entry_pname); (void)rename(entry_pname, buf); free(buf); buf = NULL; xbps_set_cb_state(xhp, XBPS_STATE_CONFIG_FILE, 0, pkgver, "Renamed old configuration file " "`%s' to `%s.old'.", entry_pname, entry_pname); } if (!force && skip_extract) { archive_read_data_skip(ar); continue; } /* * Reset entry_pname again because if entry's pathname * has been changed it will become a dangling pointer. */ entry_pname = archive_entry_pathname(entry); /* * Extract entry from archive. */ if (archive_read_extract(ar, entry, flags) != 0) { rv = archive_errno(ar); xbps_set_cb_state(xhp, XBPS_STATE_UNPACK_FAIL, rv, pkgver, "%s: [unpack] failed to extract file `%s': %s", pkgver, entry_pname, strerror(rv)); } else { if (xhp->unpack_cb != NULL) { xucd.entry_extract_count++; (*xhp->unpack_cb)(&xucd, xhp->unpack_cb_data); } } } /* * XXX: duplicate code. * Create the metaplist file if it wasn't created before. */ if (propsd && filesd && !metafile) { rv = create_pkg_metaplist(xhp, pkgname, pkgver, propsd, filesd, instbuf, instbufsiz, rembuf, rembufsiz); if (rv != 0) { xbps_set_cb_state(xhp, XBPS_STATE_UNPACK_FAIL, rv, pkgver, "%s: [unpack] failed to create metaplist file: %s", pkgver, strerror(rv)); goto out; } } /* * If there was any error extracting files from archive, error out. */ if ((rv = archive_errno(ar)) != 0) { xbps_set_cb_state(xhp, XBPS_STATE_UNPACK_FAIL, rv, pkgver, NULL, "%s: [unpack] failed to extract files: %s", pkgver, fname, archive_error_string(ar)); goto out; } /* * Skip checking for obsolete files on: * - New package installation without "softreplace" keyword. * - Package with "preserve" keyword. * - Package with "skip-obsoletes" keyword. */ if (skip_obsoletes || preserve || (!softreplace && !update)) goto out; /* * Check and remove obsolete files on: * - Package upgrade. * - Package with "softreplace" keyword. */ old_filesd = xbps_pkgdb_get_pkg_metadata(xhp, pkgname); if (old_filesd == NULL) goto out; obsoletes = xbps_find_pkg_obsoletes(xhp, old_filesd, filesd); for (i = 0; i < xbps_array_count(obsoletes); i++) { obj = xbps_array_get(obsoletes, i); file = xbps_string_cstring_nocopy(obj); if (remove(file) == -1) { xbps_set_cb_state(xhp, XBPS_STATE_REMOVE_FILE_OBSOLETE_FAIL, errno, pkgver, "%s: failed to remove obsolete entry `%s': %s", pkgver, file, strerror(errno)); continue; } xbps_set_cb_state(xhp, XBPS_STATE_REMOVE_FILE_OBSOLETE, 0, pkgver, "%s: removed obsolete entry: %s", pkgver, file); xbps_object_release(obj); } xbps_object_release(old_filesd); out: if (xbps_object_type(filesd) == XBPS_TYPE_DICTIONARY) xbps_object_release(filesd); if (xbps_object_type(propsd) == XBPS_TYPE_DICTIONARY) xbps_object_release(propsd); if (pkgname != NULL) free(pkgname); if (instbuf != NULL) free(instbuf); if (rembuf != NULL) free(rembuf); return rv; }
/** * asb_utils_explode: * @filename: package filename * @dir: directory to decompress into * @glob: (element-type utf8): filename globs, or %NULL * @error: A #GError or %NULL * * Decompresses the package into a given directory. * * Returns: %TRUE for success, %FALSE otherwise * * Since: 0.1.0 **/ gboolean asb_utils_explode (const gchar *filename, const gchar *dir, GPtrArray *glob, GError **error) { const gchar *tmp; gboolean ret = TRUE; gboolean valid; int r; struct archive *arch = NULL; struct archive *arch_preview = NULL; struct archive_entry *entry; g_autoptr(GHashTable) matches = NULL; /* populate a hash with all the files, symlinks and hardlinks that * actually need decompressing */ arch_preview = archive_read_new (); archive_read_support_format_all (arch_preview); archive_read_support_filter_all (arch_preview); r = archive_read_open_filename (arch_preview, filename, 1024 * 32); if (r) { ret = FALSE; g_set_error (error, ASB_PLUGIN_ERROR, ASB_PLUGIN_ERROR_FAILED, "Cannot open: %s", archive_error_string (arch_preview)); goto out; } matches = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); for (;;) { g_autofree gchar *path = NULL; r = archive_read_next_header (arch_preview, &entry); if (r == ARCHIVE_EOF) break; if (r != ARCHIVE_OK) { ret = FALSE; g_set_error (error, ASB_PLUGIN_ERROR, ASB_PLUGIN_ERROR_FAILED, "Cannot read header: %s", archive_error_string (arch_preview)); goto out; } /* get the destination filename */ tmp = archive_entry_pathname (entry); if (tmp == NULL) continue; path = asb_utils_sanitise_path (tmp); if (glob != NULL) { if (asb_glob_value_search (glob, path) == NULL) continue; } g_hash_table_insert (matches, g_strdup (path), GINT_TO_POINTER (1)); /* add hardlink */ tmp = archive_entry_hardlink (entry); if (tmp != NULL) { g_hash_table_insert (matches, asb_utils_sanitise_path (tmp), GINT_TO_POINTER (1)); } /* add symlink */ tmp = archive_entry_symlink (entry); if (tmp != NULL) { if (g_path_is_absolute (tmp)) { g_hash_table_insert (matches, asb_utils_sanitise_path (tmp), GINT_TO_POINTER (1)); } else { g_autofree gchar *parent_dir = g_path_get_dirname (path); g_hash_table_insert (matches, asb_utils_resolve_relative_symlink (parent_dir, tmp), GINT_TO_POINTER (1)); } } } /* decompress anything matching either glob */ arch = archive_read_new (); archive_read_support_format_all (arch); archive_read_support_filter_all (arch); r = archive_read_open_filename (arch, filename, 1024 * 32); if (r) { ret = FALSE; g_set_error (error, ASB_PLUGIN_ERROR, ASB_PLUGIN_ERROR_FAILED, "Cannot open: %s", archive_error_string (arch)); goto out; } for (;;) { g_autofree gchar *path = NULL; r = archive_read_next_header (arch, &entry); if (r == ARCHIVE_EOF) break; if (r != ARCHIVE_OK) { ret = FALSE; g_set_error (error, ASB_PLUGIN_ERROR, ASB_PLUGIN_ERROR_FAILED, "Cannot read header: %s", archive_error_string (arch)); goto out; } /* only extract if valid */ tmp = archive_entry_pathname (entry); path = asb_utils_sanitise_path (tmp); if (g_hash_table_lookup (matches, path) == NULL) continue; valid = asb_utils_explode_file (entry, dir); if (!valid) continue; r = archive_read_extract (arch, entry, 0); if (r != ARCHIVE_OK) { ret = FALSE; g_set_error (error, ASB_PLUGIN_ERROR, ASB_PLUGIN_ERROR_FAILED, "Cannot extract: %s", archive_error_string (arch)); goto out; } } out: if (arch_preview != NULL) { archive_read_close (arch_preview); archive_read_free (arch_preview); } if (arch != NULL) { archive_read_close (arch); archive_read_free (arch); } return ret; }
static int extract_files(struct dpv_file_node *file, int out __unused) { int retval; struct archive_entry *entry; char path[PATH_MAX]; char errormsg[PATH_MAX + 512]; /* Open the archive if necessary */ if (archive == NULL) { if ((archive = archive_read_new()) == NULL) { snprintf(errormsg, sizeof(errormsg), "Error: %s\n", archive_error_string(NULL)); dialog_msgbox("Extract Error", errormsg, 0, 0, TRUE); dpv_abort = 1; return (-1); } archive_read_support_format_all(archive); archive_read_support_filter_all(archive); snprintf(path, sizeof(path), "%s/%s", distdir, file->path); retval = archive_read_open_filename(archive, path, 4096); if (retval != 0) { snprintf(errormsg, sizeof(errormsg), "Error opening %s: %s\n", file->name, archive_error_string(archive)); dialog_msgbox("Extract Error", errormsg, 0, 0, TRUE); file->status = DPV_STATUS_FAILED; dpv_abort = 1; return (-1); } } /* Read the next archive header */ retval = archive_read_next_header(archive, &entry); /* If that went well, perform the extraction */ if (retval == ARCHIVE_OK) retval = archive_read_extract(archive, entry, ARCHIVE_EXTRACT_TIME | ARCHIVE_EXTRACT_OWNER | ARCHIVE_EXTRACT_PERM | ARCHIVE_EXTRACT_ACL | ARCHIVE_EXTRACT_XATTR | ARCHIVE_EXTRACT_FFLAGS); /* Test for either EOF or error */ if (retval == ARCHIVE_EOF) { archive_read_free(archive); archive = NULL; file->status = DPV_STATUS_DONE; return (100); } else if (retval != ARCHIVE_OK && !(retval == ARCHIVE_WARN && strcmp(archive_error_string(archive), "Can't restore time") == 0)) { /* * Print any warning/error messages except inability to set * ctime/mtime, which is not fatal, or even interesting, * for our purposes. Would be nice if this were a libarchive * option. */ snprintf(errormsg, sizeof(errormsg), "Error while extracting %s: %s\n", file->name, archive_error_string(archive)); dialog_msgbox("Extract Error", errormsg, 0, 0, TRUE); file->status = DPV_STATUS_FAILED; dpv_abort = 1; return (-1); } dpv_overall_read++; file->read++; /* Calculate [overall] percentage of completion (if possible) */ if (file->length >= 0) return (file->read * 100 / file->length); else return (-1); }