bool CgCompressCreator::create( const QString& path, int, int, QImage& img ){ qCDebug(LOG) << "Trying to read image" << path; QFile file(path); if( !file.open( QIODevice::ReadOnly ) ) return false; archive* a = archive_read_new(); archive_read_support_format_zip(a); ReadingData data( file ); if( archive_read_open( a, &data, nullptr, stream_read, stream_close ) ) qCWarning(LOG) << "couldn't open:" << archive_error_string(a); else{ QString name; while( !(name = next_file( a )).isNull() ){ if( name.startsWith( "Thumbnails/thumbnail." ) || name.startsWith("thumb.") ){ qCDebug(LOG) << "Found thumbnail!"; QString suffix = QFileInfo(name).suffix(); img = read_image( a, suffix.toLocal8Bit().constData() ); break; } } } archive_read_close( a ); archive_read_free( a ); if( img.isNull() ) qCWarning(LOG) << "No thumbnail found!"; return !img.isNull(); }
void TarUtils::read( const ibrcommon::File &extract_folder, std::istream &input ) { struct archive *a; struct archive_entry *entry; int ret,fd; a = archive_read_new(); archive_read_support_filter_all(a); archive_read_support_compression_all(a); archive_read_support_format_tar(a); archive_read_open(a, (void*) &input, &__tar_utils_open_callback, &__tar_utils_read_callback, &__tar_utils_close_callback); while ((ret = archive_read_next_header(a, &entry)) == ARCHIVE_OK ) { ibrcommon::File filename = extract_folder.get(archive_entry_pathname(entry)); ibrcommon::File path = filename.getParent(); ibrcommon::File::createDirectory(path); fd = open(filename.getPath().c_str(),O_CREAT|O_WRONLY,0600); if(fd < 0) throw ibrcommon::IOException("cannot open file " + path.getPath()); archive_read_data_into_fd(a,fd); close(fd); } archive_read_free(a); }
void restraint_fetch_http (SoupURI *url, const gchar *base_path, ArchiveEntryCallback archive_entry_callback, FetchFinishCallback finish_callback, gpointer user_data) { g_return_if_fail(url != NULL); g_return_if_fail(base_path != NULL); FetchData *fetch_data = g_slice_new0(FetchData); fetch_data->archive_entry_callback = archive_entry_callback; fetch_data->finish_callback = finish_callback; fetch_data->user_data = user_data; fetch_data->url = url; fetch_data->base_path = base_path; GError *tmp_error = NULL; gint r; session = soup_session_new(); fetch_data->a = archive_read_new(); if (fetch_data->a == NULL) { g_set_error(&fetch_data->error, RESTRAINT_FETCH_LIBARCHIVE_ERROR, 0, "archive_read_new failed"); g_idle_add (archive_finish_callback, fetch_data); return; } fetch_data->ext = archive_write_disk_new(); if (fetch_data->ext == NULL) { g_set_error(&fetch_data->error, RESTRAINT_FETCH_LIBARCHIVE_ERROR, 0, "archive_write_disk_new failed"); g_idle_add (archive_finish_callback, fetch_data); return; } archive_read_support_filter_all(fetch_data->a); archive_read_support_format_all(fetch_data->a); gboolean open_succeeded = myopen(fetch_data, &tmp_error); if (!open_succeeded) { g_propagate_error(&fetch_data->error, tmp_error); g_idle_add (archive_finish_callback, fetch_data); return; } r = archive_read_open(fetch_data->a, fetch_data, NULL, myread, myclose); if (r != ARCHIVE_OK) { g_set_error(&fetch_data->error, RESTRAINT_FETCH_LIBARCHIVE_ERROR, r, "archive_read_open failed: %s", archive_error_string(fetch_data->a)); g_idle_add (archive_finish_callback, fetch_data); return; } g_idle_add (http_archive_read_callback, fetch_data); }
/* Open an inner archive at the current position within the given outer archive. */ static struct archive *open_inner(struct archive *outer) { struct archive *inner; struct inner_data *data; int r; inner = archive_read_new(); if (!inner) { opkg_msg(ERROR, "Failed to create inner archive object.\n"); return NULL; } data = (struct inner_data *)xmalloc(sizeof(struct inner_data)); data->buffer = xmalloc(EXTRACT_BUFFER_LEN); data->outer = outer; /* Inner package is in 'tar' format, gzip compressed. */ r = archive_read_support_filter_gzip(inner); if (r == ARCHIVE_WARN) { /* libarchive returns ARCHIVE_WARN if the filter is provided by * an external program. */ opkg_msg(INFO, "Gzip support provided by external program.\n"); } else if (r != ARCHIVE_OK) { opkg_msg(ERROR, "Gzip format not supported.\n"); goto err_cleanup; } r = archive_read_support_format_tar(inner); if (r != ARCHIVE_OK) { opkg_msg(ERROR, "Tar format not supported: %s\n", archive_error_string(outer)); goto err_cleanup; } r = archive_read_open(inner, data, NULL, inner_read, inner_close); if (r != ARCHIVE_OK) { opkg_msg(ERROR, "Failed to open inner archive: %s\n", archive_error_string(inner)); goto err_cleanup; } return inner; err_cleanup: archive_read_free(inner); free(data->buffer); free(data); return NULL; }
size_t LIBARCHIVEgetEntry(char *name, char *contentFile, char **ptr) { struct mydata *mydata; struct archive *a; struct archive_entry *entry; char *buf; size_t size = 0; mydata = (struct mydata*)malloc(sizeof(struct mydata)); a = archive_read_new(); mydata->name = name; archive_read_support_format_all(a); archive_read_support_compression_all(a); if (archive_read_open(a, mydata, myopen, myread, myclose) == ARCHIVE_FATAL) { fprintf(stderr, "failed to open %s\n", mydata->name); free(mydata->name); free(mydata); return 0; } while (archive_read_next_header(a, &entry) == ARCHIVE_OK) { if( 0 == strcmp(archive_entry_pathname(entry), contentFile)) { o_log(DEBUGM, "%s", (char *)archive_compression_name(a)); o_log(DEBUGM, "%s", (char *)archive_format_name(a)); o_log(DEBUGM, "%s", (char *)archive_entry_pathname(entry)); size = archive_entry_size(entry); if(size <= 0) o_log(DEBUGM, "zero size"); if ((buf = (char *)malloc(size+1)) == NULL) o_log(ERROR, "cannot allocate memory"); if ((size_t)archive_read_data(a, buf, size) != size) o_log(DEBUGM, "cannot read data"); buf[size] = '\0'; *ptr = buf; } else archive_read_data_skip(a); } archive_read_close(a); archive_read_finish(a); free(mydata); return size; }
int pkg_verify_signature(const char *archive_name, struct archive **archive, struct archive_entry **entry, char **pkgname) { struct signature_archive *state; struct archive_entry *my_entry; struct archive *a; char *hash_file, *signature_file; size_t hash_len, signature_len; int r, has_sig; *pkgname = NULL; state = xmalloc(sizeof(*state)); state->sign_blocks = NULL; state->sign_buf = NULL; state->archive = NULL; r = read_file_from_archive(archive_name, *archive, entry, HASH_FNAME, &hash_file, &hash_len); if (r == -1) { archive_read_finish(*archive); *archive = NULL; free(state); goto no_valid_signature; } else if (r == 1) { free(state); goto no_valid_signature; } if (parse_hash_file(hash_file, pkgname, state)) goto no_valid_signature; r = read_file_from_archive(archive_name, *archive, entry, SIGNATURE_FNAME, &signature_file, &signature_len); if (r == -1) { archive_read_finish(*archive); *archive = NULL; free(state); free(hash_file); goto no_valid_signature; } else if (r != 0) { if (*entry != NULL) r = read_file_from_archive(archive_name, *archive, entry, GPG_SIGNATURE_FNAME, &signature_file, &signature_len); if (r == -1) { archive_read_finish(*archive); *archive = NULL; free(state); free(hash_file); goto no_valid_signature; } else if (r != 0) { free(hash_file); free(state); goto no_valid_signature; } has_sig = !detached_gpg_verify(hash_file, hash_len, signature_file, signature_len, gpg_keyring_verify); free(signature_file); } else { #ifdef HAVE_SSL has_sig = !easy_pkcs7_verify(hash_file, hash_len, signature_file, signature_len, certs_packages, 1); free(signature_file); #else warnx("No OpenSSL support compiled in, skipping signature"); has_sig = 0; free(signature_file); #endif } r = archive_read_next_header(*archive, &my_entry); if (r != ARCHIVE_OK) { warnx("Cannot read inner package: %s", archive_error_string(*archive)); free_signature_int(state); goto no_valid_signature; } if (archive_entry_size(my_entry) != state->pkg_size) { warnx("Package size doesn't match signature"); free_signature_int(state); goto no_valid_signature; } state->archive = *archive; a = archive_read_new(); archive_read_support_compression_all(a); archive_read_support_format_all(a); if (archive_read_open(a, state, NULL, verify_signature_read_cb, verify_signature_close_cb)) { warnx("Can't open signed package file"); archive_read_finish(a); goto no_valid_signature; } *archive = a; *entry = NULL; return has_sig ? 0 : -1; no_valid_signature: return -1; }
/* * explode source RPM into the current directory * use filters to skip packages and files we do not need */ int explodeRPM(const char *source, filterfunc filter, dependencyfunc provides, dependencyfunc deps, void* userptr) { char buffer[BUFFERSIZE+1]; /* make space for trailing \0 */ FD_t fdi; Header h; char * rpmio_flags = NULL; rpmRC rc; FD_t gzdi; struct archive *cpio; struct archive_entry *cpio_entry; struct cpio_mydata cpio_mydata; rpmts ts; rpmVSFlags vsflags; const char *compr; if (strcmp(source, "-") == 0) fdi = fdDup(STDIN_FILENO); else fdi = Fopen(source, "r.ufdio"); if (Ferror(fdi)) { const char *srcname = (strcmp(source, "-") == 0) ? "<stdin>" : source; logMessage(ERROR, "%s: %s\n", srcname, Fstrerror(fdi)); return EXIT_FAILURE; } rpmReadConfigFiles(NULL, NULL); /* Initialize RPM transaction */ ts = rpmtsCreate(); vsflags = 0; /* Do not check digests, signatures or headers */ vsflags |= _RPMVSF_NODIGESTS; vsflags |= _RPMVSF_NOSIGNATURES; vsflags |= RPMVSF_NOHDRCHK; (void) rpmtsSetVSFlags(ts, vsflags); rc = rpmReadPackageFile(ts, fdi, "rpm2dir", &h); ts = rpmtsFree(ts); switch (rc) { case RPMRC_OK: case RPMRC_NOKEY: case RPMRC_NOTTRUSTED: break; case RPMRC_NOTFOUND: logMessage(ERROR, "%s is not an RPM package", source); return EXIT_FAILURE; break; case RPMRC_FAIL: default: logMessage(ERROR, "error reading header from %s package\n", source); return EXIT_FAILURE; break; } /* Retrieve all dependencies and run them through deps function */ while (deps) { struct rpmtd_s td; const char *depname; if (!headerGet(h, RPMTAG_REQUIRENAME, &td, HEADERGET_MINMEM)) break; /* iterator */ while ((depname = rpmtdNextString(&td))) { if (deps(depname, userptr)) { Fclose(fdi); return EXIT_BADDEPS; } } rpmtdFreeData(&td); break; } /* Retrieve all provides and run them through provides function */ while (provides) { struct rpmtd_s td; const char *depname; int found = 0; if (!headerGet(h, RPMTAG_PROVIDES, &td, HEADERGET_MINMEM)) break; /* iterator */ while ((depname = rpmtdNextString(&td))) { if (!provides(depname, userptr)) { found++; } } rpmtdFreeData(&td); if (found<=0) return EXIT_BADDEPS; break; } /* Retrieve type of payload compression. */ compr = headerGetString(h, RPMTAG_PAYLOADCOMPRESSOR); if (compr && strcmp(compr, "gzip")) { checked_asprintf(&rpmio_flags, "r.%sdio", compr); } else { checked_asprintf(&rpmio_flags, "r.gzdio"); } /* Open uncompressed cpio stream */ gzdi = Fdopen(fdi, rpmio_flags); free(rpmio_flags); if (gzdi == NULL) { logMessage(ERROR, "cannot re-open payload: %s\n", Fstrerror(gzdi)); return EXIT_FAILURE; } /* initialize cpio decompressor */ cpio = archive_read_new(); if (cpio==NULL) { Fclose(gzdi); return -1; } cpio_mydata.gzdi = gzdi; cpio_mydata.buffer = buffer; archive_read_support_compression_all(cpio); archive_read_support_format_all(cpio); rc = archive_read_open(cpio, &cpio_mydata, NULL, rpm_myread, rpm_myclose); /* check the status of archive_open */ if (rc != ARCHIVE_OK){ Fclose(gzdi); return -1; } /* read all files in cpio archive */ while ((rc = archive_read_next_header(cpio, &cpio_entry)) == ARCHIVE_OK){ const struct stat *fstat; int64_t fsize; const char* filename; int needskip = 1; /* do we need to read the data to get to the next header? */ int offset = 0; int towrite = 0; filename = archive_entry_pathname(cpio_entry); fstat = archive_entry_stat(cpio_entry); fsize = archive_entry_size(cpio_entry); /* Strip leading slashes */ while (filename[offset] == '/') offset+=1; /* Strip leading ./ */ while (filename[offset] == '.' && filename[offset+1] == '/') offset+=2; /* Other file type - we do not care except special cases */ if (!S_ISREG(fstat->st_mode)) towrite = 1; else towrite = 2; if (filter && filter(filename+offset, fstat, userptr)) { /* filter this file */ towrite = 0; } /* Create directories */ char* dirname = strdup(filename+offset); /* If the dup fails, let's hope the dirs already exist */ if (dirname){ char* dirptr = dirname; while (dirptr && *dirptr) { dirptr = strchr(dirptr, '/'); if (dirptr) { *dirptr = 0; mkdir(dirname, 0700); *dirptr = '/'; dirptr++; } } free(dirname); } /* Regular file */ if (towrite>=2) { FILE *fdout = fopen(filename+offset, "w"); if (fdout==NULL){ rc = 33; break; } rc = archive_read_data_into_fd(cpio, fileno(fdout)); if (rc!=ARCHIVE_OK) { /* XXX We didn't get the file.. well.. */ needskip = 0; } else { needskip = 0; fclose(fdout); } } /* symlink, we assume that the path contained in symlink * is shorter than BUFFERSIZE */ while (towrite && S_ISLNK(fstat->st_mode)) { char symlinkbuffer[BUFFERSIZE-1]; needskip = 0; if ((rc = archive_read_data(cpio, symlinkbuffer, fsize))!=ARCHIVE_OK) { /* XXX We didn't get the file.. well.. */ break; } if (symlink(buffer, filename+offset)) { logMessage(ERROR, "Failed to create symlink %s -> %s", filename+offset, buffer); } break; } if(needskip) archive_read_data_skip(cpio); } archive_read_finish(cpio); return rc != ARCHIVE_OK; }
void pixz_read(bool verify, size_t nspecs, char **specs) { decode_index(); if (verify) gFileIndexOffset = read_file_index(0); wanted_files(nspecs, specs); set_block_sizes(); #if DEBUG for (wanted_t *w = gWantedFiles; w; w = w->next) debug("want: %s", w->name); #endif pipeline_create(block_create, block_free, read_thread, decode_thread); if (verify && gFileIndexOffset) { gArWanted = gWantedFiles; wanted_t *w = gWantedFiles, *wlast = NULL; bool lastmulti = false; off_t lastoff = 0; struct archive *ar = archive_read_new(); archive_read_support_compression_none(ar); archive_read_support_format_tar(ar); archive_read_open(ar, NULL, tar_ok, tar_read, tar_ok); struct archive_entry *entry; while (true) { int aerr = archive_read_next_header(ar, &entry); if (aerr == ARCHIVE_EOF) { break; } else if (aerr != ARCHIVE_OK && aerr != ARCHIVE_WARN) { fprintf(stderr, "%s\n", archive_error_string(ar)); die("Error reading archive entry"); } off_t off = archive_read_header_position(ar); const char *path = archive_entry_pathname(entry); if (!lastmulti) { if (wlast && wlast->size != off - lastoff) die("Index and archive show differing sizes for %s: %d vs %d", wlast->name, wlast->size, off - lastoff); lastoff = off; } lastmulti = is_multi_header(path); if (lastmulti) continue; if (!w) die("File %s missing in index", path); if (strcmp(path, w->name) != 0) die("Index and archive differ as to next file: %s vs %s", w->name, path); wlast = w; w = w->next; } if (w && w->name) die("File %s missing in archive", w->name); tar_write_last(); // write whatever's left } else { pipeline_item_t *pi; while ((pi = pipeline_merged())) { io_block_t *ib = (io_block_t*)(pi->data); fwrite(ib->output, ib->outsize, 1, gOutFile); queue_push(gPipelineStartQ, PIPELINE_ITEM, pi); } } pipeline_destroy(); wanted_free(gWantedFiles); }
/** * Extract the archive stored at the given @path. This function * returns -1 if an error occurred, otherwise 0. */ int extract_archive_from_file(const char *path) { struct archive *archive = NULL; CallbackData *data = NULL; int status2; int status; archive = archive_read_new(); if (archive == NULL) { note(NULL, ERROR, INTERNAL, "can't initialize archive structure"); status = -1; goto end; } status = archive_read_support_format_cpio(archive); if (status != ARCHIVE_OK) { note(NULL, ERROR, INTERNAL, "can't set archive format: %s", archive_error_string(archive)); status = -1; goto end; } status = archive_read_support_format_gnutar(archive); if (status != ARCHIVE_OK) { note(NULL, ERROR, INTERNAL, "can't set archive format: %s", archive_error_string(archive)); status = -1; goto end; } status = archive_read_support_filter_gzip(archive); if (status != ARCHIVE_OK) { note(NULL, ERROR, INTERNAL, "can't add archive filter: %s", archive_error_string(archive)); status = -1; goto end; } status = archive_read_support_filter_lzop(archive); if (status != ARCHIVE_OK) { note(NULL, ERROR, INTERNAL, "can't add archive filter: %s", archive_error_string(archive)); status = -1; goto end; } data = talloc_zero(NULL, CallbackData); if (data == NULL) { note(NULL, ERROR, INTERNAL, "can't allocate callback data"); status = -1; goto end; } data->path = talloc_strdup(data, path); if (data->path == NULL) { note(NULL, ERROR, INTERNAL, "can't allocate callback data path"); status = -1; goto end; } status = archive_read_open(archive, data, open_callback, read_callback, close_callback); if (status != ARCHIVE_OK) { /* Don't complain if no error message were registered, * ie. when testing for a self-extracting archive. */ if (archive_error_string(archive) != NULL) note(NULL, ERROR, INTERNAL, "can't read archive: %s", archive_error_string(archive)); status = -1; goto end; } status = extract_archive(archive); end: if (archive != NULL) { status2 = archive_read_close(archive); if (status2 != ARCHIVE_OK) { note(NULL, WARNING, INTERNAL, "can't close archive: %s", archive_error_string(archive)); } status2 = archive_read_free(archive); if (status2 != ARCHIVE_OK) { note(NULL, WARNING, INTERNAL, "can't free archive: %s", archive_error_string(archive)); } } TALLOC_FREE(data); return status; }
////////////////////////////////////////////////////////////////////// // Constructor: static int ar_read(lua_State *L) { struct archive** self_ref; static named_setter format_names[] = { /* Copied from archive.h */ { "all", archive_read_support_format_all }, { "ar", archive_read_support_format_ar }, { "cpio", archive_read_support_format_cpio }, { "empty", archive_read_support_format_empty }, { "gnutar", archive_read_support_format_gnutar }, { "iso9660", archive_read_support_format_iso9660 }, { "mtree", archive_read_support_format_mtree }, { "tar", archive_read_support_format_tar }, { "zip", archive_read_support_format_zip }, { NULL, NULL } }; static named_setter compression_names[] = { { "all", archive_read_support_compression_all }, { "bzip2", archive_read_support_compression_bzip2 }, { "compress", archive_read_support_compression_compress }, { "gzip", archive_read_support_compression_gzip }, { "lzma", archive_read_support_compression_lzma }, { "none", archive_read_support_compression_none }, { "xz", archive_read_support_compression_xz }, { NULL, NULL } }; luaL_checktype(L, 1, LUA_TTABLE); self_ref = (struct archive**) lua_newuserdata(L, sizeof(struct archive*)); // {ud} *self_ref = NULL; luaL_getmetatable(L, AR_READ); // {ud}, [read] lua_setmetatable(L, -2); // {ud} __ref_count++; *self_ref = archive_read_new(); // Register it in the weak metatable: ar_registry_set(L, *self_ref); // Create an environment to store a reference to the callbacks: lua_createtable(L, 1, 0); // {ud}, {fenv} lua_getfield(L, 1, "reader"); // {ud}, {fenv}, fn if ( ! lua_isfunction(L, -1) ) err("MissingArgument: required parameter 'reader' must be a function"); lua_setfield(L, -2, "reader"); // {ud}, {fenv} lua_setfenv(L, -2); // {ud} // Do it the easy way for now... perhaps in the future we will // have a parameter to support toggling which algorithms are // supported: if ( ARCHIVE_OK != archive_read_support_compression_all(*self_ref) ) { err("archive_read_support_compression_all: %s", archive_error_string(*self_ref)); } if ( ARCHIVE_OK != archive_read_support_format_all(*self_ref) ) { err("archive_read_support_format_all: %s", archive_error_string(*self_ref)); } // Extract various fields and prepare the archive: lua_getfield(L, 1, "format"); if ( NULL == lua_tostring(L, -1) ) { lua_pop(L, 1); lua_pushliteral(L, "all"); } if ( 0 == call_setters(L, *self_ref, "archive_read_support_format_", format_names, lua_tostring(L, -1)) ) { // We will be strict for now... perhaps in the future we will // default to "all"? err("empty format='%s' is not allowed, you must specify at least one format", lua_tostring(L, -1)); } lua_pop(L, 1); lua_getfield(L, 1, "compression"); if ( NULL == lua_tostring(L, -1) ) { lua_pop(L, 1); lua_pushliteral(L, "none"); } call_setters(L, *self_ref, "archive_read_support_compression_", compression_names, lua_tostring(L, -1)); lua_pop(L, 1); lua_getfield(L, 1, "options"); if ( ! lua_isnil(L, -1) && ARCHIVE_OK != archive_read_set_options(*self_ref, lua_tostring(L, -1)) ) { err("archive_read_set_options: %s", archive_error_string(*self_ref)); } lua_pop(L, 1); if ( ARCHIVE_OK != archive_read_open(*self_ref, L, NULL, &ar_read_cb, NULL) ) { err("archive_read_open: %s", archive_error_string(*self_ref)); } return 1; }
void pixz_read(bool verify, size_t nspecs, char **specs) { if (decode_index()) { if (verify) gFileIndexOffset = read_file_index(); wanted_files(nspecs, specs); gExplicitFiles = nspecs; } #if DEBUG for (wanted_t *w = gWantedFiles; w; w = w->next) debug("want: %s", w->name); #endif pipeline_create(block_create, block_free, gIndex ? read_thread : read_thread_noindex, decode_thread); if (verify && gFileIndexOffset) { gArWanted = gWantedFiles; wanted_t *w = gWantedFiles, *wlast = NULL; bool lastmulti = false; off_t lastoff = 0; struct archive *ar = archive_read_new(); archive_read_support_compression_none(ar); archive_read_support_format_tar(ar); archive_read_open(ar, NULL, tar_ok, tar_read, tar_ok); struct archive_entry *entry; while (true) { int aerr = archive_read_next_header(ar, &entry); if (aerr == ARCHIVE_EOF) { break; } else if (aerr != ARCHIVE_OK && aerr != ARCHIVE_WARN) { fprintf(stderr, "%s\n", archive_error_string(ar)); die("Error reading archive entry"); } off_t off = archive_read_header_position(ar); const char *path = archive_entry_pathname(entry); if (!lastmulti) { if (wlast && wlast->size != off - lastoff) die("Index and archive show differing sizes for %s: %d vs %d", wlast->name, wlast->size, off - lastoff); lastoff = off; } lastmulti = is_multi_header(path); if (lastmulti) continue; if (!w) die("File %s missing in index", path); if (strcmp(path, w->name) != 0) die("Index and archive differ as to next file: %s vs %s", w->name, path); wlast = w; w = w->next; } archive_read_finish(ar); if (w && w->name) die("File %s missing in archive", w->name); tar_write_last(); // write whatever's left } if (!gExplicitFiles) { /* Heuristics for detecting pixz file index: * - Input must be streaming (otherwise read_thread does this) * - Data must look tar-like * - Must have all sized blocks, followed by unsized file index */ bool start = !gIndex && verify, tar = false, all_sized = true, skipping = false; pipeline_item_t *pi; while ((pi = pipeline_merged())) { io_block_t *ib = (io_block_t*)(pi->data); if (skipping && ib->btype != BLOCK_CONTINUATION) { fprintf(stderr, "Warning: File index heuristic failed, use -t flag.\n"); skipping = false; } if (!skipping && tar && !start && all_sized && ib->btype == BLOCK_UNSIZED && taste_file_index(ib)) skipping = true; if (start) { tar = taste_tar(ib); start = false; } if (ib->btype == BLOCK_UNSIZED) all_sized = false; if (!skipping) fwrite(ib->output, ib->outsize, 1, gOutFile); queue_push(gPipelineStartQ, PIPELINE_ITEM, pi); } } pipeline_destroy(); wanted_free(gWantedFiles); }
/* ArchiveReader::__construct {{{ * */ ZEND_METHOD(ArchiveReader, __construct) { archive_file_t *arch = NULL; int resource_id; zval *this = getThis(); const char *error_string = NULL; char *filename; long error_num, filename_len, result, format = 0, compression = 0, block_size = 0; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, ce_ArchiveException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|lll", &filename, &filename_len, &format, &compression, &block_size) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } #if PHP_API_VERSION < 20100412 if (PG(safe_mode) && (!php_checkuid(filename, NULL, CHECKUID_CHECK_FILE_AND_DIR))) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } #endif if (php_check_open_basedir(filename TSRMLS_CC)) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if(block_size <= 0){ block_size = PHP_ARCHIVE_BUF_LEN; } arch = (archive_file_t *) emalloc(sizeof(archive_file_t)); arch->stream = NULL; arch->current_entry = NULL; arch->entries = NULL; arch->struct_state = ARCHIVE_OK; arch->block_size = block_size; arch->mode = PHP_ARCHIVE_READ_MODE; arch->buf = emalloc(arch->block_size + 1); arch->filename = estrndup(filename, filename_len); arch->arch = archive_read_new(); archive_read_support_filter_all(arch->arch); switch(format){ case PHP_ARCHIVE_FORMAT_TAR: archive_read_support_format_tar(arch->arch); break; case PHP_ARCHIVE_FORMAT_CPIO: archive_read_support_format_cpio(arch->arch); break; default: archive_read_support_format_all(arch->arch); break; } switch(compression){ case PHP_ARCHIVE_COMPRESSION_NONE: break; case PHP_ARCHIVE_COMPRESSION_GZIP: if(archive_read_support_filter_gzip(arch->arch) != ARCHIVE_OK){ efree(arch->filename); efree(arch->buf); efree(arch); php_error_docref(NULL TSRMLS_CC, E_WARNING, "Gzip compression support is not available in this build "); zend_restore_error_handling(&error_handling TSRMLS_CC); return; } break; case PHP_ARCHIVE_COMPRESSION_BZIP2: if(archive_read_support_filter_gzip(arch->arch) != ARCHIVE_OK){ efree(arch->filename); efree(arch->buf); efree(arch); php_error_docref(NULL TSRMLS_CC, E_WARNING, "Bzip2 compression support is not available in this build "); zend_restore_error_handling(&error_handling TSRMLS_CC); return; } default: archive_read_support_filter_all(arch->arch); break; } result = archive_read_open(arch->arch, arch, _archive_open_clbk, _archive_read_clbk, _archive_close_clbk); if (result) { error_num = archive_errno(arch->arch); error_string = archive_error_string(arch->arch); if (arch->stream) { php_stream_close(arch->stream); } if (error_num && error_string) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Failed to open file %s for reading: error #%d, %s", filename, error_num, error_string); } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Failed to open file %s for reading: unknown error %d", filename, result); } zend_restore_error_handling(&error_handling TSRMLS_CC); archive_read_close(arch->arch); archive_read_free(arch->arch); efree(arch->filename); efree(arch->buf); efree(arch); return; } resource_id = zend_list_insert(arch,le_archive); add_property_resource(this, "fd", resource_id); zend_restore_error_handling(&error_handling TSRMLS_CC); return; }
static retvalue read_data_tar(/*@out@*/char **list, /*@out@*/size_t *size, const char *debfile, struct ar_archive *ar, struct archive *tar) { struct archive_entry *entry; struct filelistcompressor c; retvalue r; int a, e; r = filelistcompressor_setup(&c); if (RET_WAS_ERROR(r)) return r; archive_read_support_format_tar(tar); archive_read_support_format_gnutar(tar); a = archive_read_open(tar, ar, ar_archivemember_open, ar_archivemember_read, ar_archivemember_close); if (a != ARCHIVE_OK) { filelistcompressor_cancel(&c); e = archive_errno(tar); if (e == -EINVAL) /* special code to say there is none */ fprintf(stderr, "open data.tar within '%s' failed: %s\n", debfile, archive_error_string(tar)); else fprintf(stderr, "open data.tar within '%s' failed: %d:%d:%s\n", debfile, a, e, archive_error_string(tar)); return RET_ERROR; } while ((a=archive_read_next_header(tar, &entry)) == ARCHIVE_OK) { const char *name = archive_entry_pathname(entry); mode_t mode; if (name[0] == '.') name++; if (name[0] == '/') name++; if (name[0] == '\0') continue; mode = archive_entry_mode(entry); if (!S_ISDIR(mode)) { r = filelistcompressor_add(&c, name, strlen(name)); if (RET_WAS_ERROR(r)) { filelistcompressor_cancel(&c); return r; } } if (interrupted()) { filelistcompressor_cancel(&c); return RET_ERROR_INTERRUPTED; } a = archive_read_data_skip(tar); if (a != ARCHIVE_OK) { e = archive_errno(tar); if (e == -EINVAL) { r = RET_ERROR; fprintf(stderr, "Error skipping %s within data.tar from %s: %s\n", archive_entry_pathname(entry), debfile, archive_error_string(tar)); } else { fprintf(stderr, "Error %d skipping %s within data.tar from %s: %s\n", e, archive_entry_pathname(entry), debfile, archive_error_string(tar)); if (e != 0) r = RET_ERRNO(e); else r = RET_ERROR; } filelistcompressor_cancel(&c); return r; } } if (a != ARCHIVE_EOF) { e = archive_errno(tar); if (e == -EINVAL) { r = RET_ERROR; fprintf(stderr, "Error reading data.tar from %s: %s\n", debfile, archive_error_string(tar)); } else { fprintf(stderr, "Error %d reading data.tar from %s: %s\n", e, debfile, archive_error_string(tar)); if (e != 0) r = RET_ERRNO(e); else r = RET_ERROR; } filelistcompressor_cancel(&c); return r; } return filelistcompressor_finish(&c, list, size); }