void PackUnix::pack4(OutputFile *fo, Filter &) { writePackHeader(fo); unsigned tmp; set_te32(&tmp, overlay_offset); fo->write(&tmp, sizeof(tmp)); }
void PackUnix::pack(OutputFile *fo) { Filter ft(ph.level); ft.addvalue = 0; b_len = 0; progid = 0; // set options blocksize = opt->o_unix.blocksize; if (blocksize <= 0) blocksize = BLOCKSIZE; if ((off_t)blocksize > file_size) blocksize = file_size; // init compression buffers ibuf.alloc(blocksize); obuf.allocForCompression(blocksize); fi->seek(0, SEEK_SET); pack1(fo, ft); // generate Elf header, etc. p_info hbuf; set_te32(&hbuf.p_progid, progid); set_te32(&hbuf.p_filesize, file_size); set_te32(&hbuf.p_blocksize, blocksize); fo->write(&hbuf, sizeof(hbuf)); // append the compressed body if (pack2(fo, ft)) { // write block end marker (uncompressed size 0) b_info hdr; memset(&hdr, 0, sizeof(hdr)); set_le32(&hdr.sz_cpr, UPX_MAGIC_LE32); fo->write(&hdr, sizeof(hdr)); } pack3(fo, ft); // append loader pack4(fo, ft); // append PackHeader and overlay_offset; update Elf header // finally check the compression ratio if (!checkFinalCompressionRatio(fo)) throwNotCompressible(); }
void PackUnix::patchLoaderChecksum() { unsigned char *const ptr = getLoader(); l_info *const lp = &linfo; // checksum for loader; also some PackHeader info lp->l_magic = UPX_MAGIC_LE32; // LE32 always set_te16(&lp->l_lsize, (upx_uint16_t) lsize); lp->l_version = (unsigned char) ph.version; lp->l_format = (unsigned char) ph.format; // INFO: lp->l_checksum is currently unused set_te32(&lp->l_checksum, upx_adler32(ptr, lsize)); }
void PackUnix::packExtent( const Extent &x, unsigned &total_in, unsigned &total_out, Filter *ft, OutputFile *fo, unsigned hdr_u_len ) { unsigned const init_u_adler = ph.u_adler; unsigned const init_c_adler = ph.c_adler; MemBuffer hdr_ibuf; if (hdr_u_len) { hdr_ibuf.alloc(hdr_u_len); fi->seek(0, SEEK_SET); int l = fi->readx(hdr_ibuf, hdr_u_len); (void)l; } fi->seek(x.offset, SEEK_SET); for (off_t rest = x.size; 0 != rest; ) { int const filter_strategy = ft ? getStrategy(*ft) : 0; int l = fi->readx(ibuf, UPX_MIN(rest, (off_t)blocksize)); if (l == 0) { break; } rest -= l; // Note: compression for a block can fail if the // file is e.g. blocksize + 1 bytes long // compress ph.c_len = ph.u_len = l; ph.overlap_overhead = 0; unsigned end_u_adler = 0; if (ft) { // compressWithFilters() updates u_adler _inside_ compress(); // that is, AFTER filtering. We want BEFORE filtering, // so that decompression checks the end-to-end checksum. end_u_adler = upx_adler32(ibuf, ph.u_len, ph.u_adler); ft->buf_len = l; // compressWithFilters() requirements? ph.filter = 0; ph.filter_cto = 0; ft->id = 0; ft->cto = 0; compressWithFilters(ft, OVERHEAD, NULL_cconf, filter_strategy, 0, 0, 0, hdr_ibuf, hdr_u_len); } else { (void) compress(ibuf, ph.u_len, obuf); // ignore return value } if (ph.c_len < ph.u_len) { const upx_bytep tbuf = NULL; if (ft == NULL || ft->id == 0) tbuf = ibuf; ph.overlap_overhead = OVERHEAD; if (!testOverlappingDecompression(obuf, tbuf, ph.overlap_overhead)) { // not in-place compressible ph.c_len = ph.u_len; } } if (ph.c_len >= ph.u_len) { // block is not compressible ph.c_len = ph.u_len; memcpy(obuf, ibuf, ph.c_len); // must update checksum of compressed data ph.c_adler = upx_adler32(ibuf, ph.u_len, ph.saved_c_adler); } // write block sizes b_info tmp; if (hdr_u_len) { unsigned hdr_c_len = 0; MemBuffer hdr_obuf; hdr_obuf.allocForCompression(hdr_u_len); int r = upx_compress(hdr_ibuf, hdr_u_len, hdr_obuf, &hdr_c_len, 0, ph.method, 10, NULL, NULL); if (r != UPX_E_OK) throwInternalError("header compression failed"); if (hdr_c_len >= hdr_u_len) throwInternalError("header compression size increase"); ph.saved_u_adler = upx_adler32(hdr_ibuf, hdr_u_len, init_u_adler); ph.saved_c_adler = upx_adler32(hdr_obuf, hdr_c_len, init_c_adler); ph.u_adler = upx_adler32(ibuf, ph.u_len, ph.saved_u_adler); ph.c_adler = upx_adler32(obuf, ph.c_len, ph.saved_c_adler); end_u_adler = ph.u_adler; memset(&tmp, 0, sizeof(tmp)); set_te32(&tmp.sz_unc, hdr_u_len); set_te32(&tmp.sz_cpr, hdr_c_len); tmp.b_method = (unsigned char) ph.method; fo->write(&tmp, sizeof(tmp)); b_len += sizeof(b_info); fo->write(hdr_obuf, hdr_c_len); total_out += hdr_c_len; total_in += hdr_u_len; hdr_u_len = 0; // compress hdr one time only } memset(&tmp, 0, sizeof(tmp)); set_te32(&tmp.sz_unc, ph.u_len); set_te32(&tmp.sz_cpr, ph.c_len); if (ph.c_len < ph.u_len) { tmp.b_method = (unsigned char) ph.method; if (ft) { tmp.b_ftid = (unsigned char) ft->id; tmp.b_cto8 = ft->cto; } } fo->write(&tmp, sizeof(tmp)); b_len += sizeof(b_info); if (ft) { ph.u_adler = end_u_adler; } // write compressed data if (ph.c_len < ph.u_len) { fo->write(obuf, ph.c_len); // Checks ph.u_adler after decompression, after unfiltering verifyOverlappingDecompression(ft); } else { fo->write(ibuf, ph.u_len); } total_in += ph.u_len; total_out += ph.c_len; } }
int PackUnix::pack2(OutputFile *fo, Filter &ft) { // compress blocks unsigned total_in = 0; unsigned total_out = 0; // FIXME: ui_total_passes is not correct with multiple blocks... // ui_total_passes = (file_size + blocksize - 1) / blocksize; // if (ui_total_passes == 1) // ui_total_passes = 0; unsigned remaining = file_size; unsigned n_block = 0; while (remaining > 0) { // FIXME: disable filters if we have more than one block. // FIXME: There is only 1 un-filter in the stub [as of 2002-11-10]. // So the next block really has no choice! // This merely prevents an assert() in compressWithFilters(), // which assumes it has free choice on each call [block]. // And if the choices aren't the same on each block, // then un-filtering will give incorrect results. int filter_strategy = getStrategy(ft); if (file_size > (off_t)blocksize) filter_strategy = -3; // no filters int l = fi->readx(ibuf, UPX_MIN(blocksize, remaining)); remaining -= l; // Note: compression for a block can fail if the // file is e.g. blocksize + 1 bytes long // compress ph.overlap_overhead = 0; ph.c_len = ph.u_len = l; ft.buf_len = l; // compressWithFilters() updates u_adler _inside_ compress(); // that is, AFTER filtering. We want BEFORE filtering, // so that decompression checks the end-to-end checksum. unsigned const end_u_adler = upx_adler32(ibuf, ph.u_len, ph.u_adler); compressWithFilters(&ft, OVERHEAD, NULL_cconf, filter_strategy, !!n_block++); // check compression ratio only on first block if (ph.c_len < ph.u_len) { const upx_bytep tbuf = NULL; if (ft.id == 0) tbuf = ibuf; ph.overlap_overhead = OVERHEAD; if (!testOverlappingDecompression(obuf, tbuf, ph.overlap_overhead)) { // not in-place compressible ph.c_len = ph.u_len; } } if (ph.c_len >= ph.u_len) { // block is not compressible ph.c_len = ph.u_len; // must manually update checksum of compressed data ph.c_adler = upx_adler32(ibuf, ph.u_len, ph.saved_c_adler); } // write block header b_info blk_info; memset(&blk_info, 0, sizeof(blk_info)); set_te32(&blk_info.sz_unc, ph.u_len); set_te32(&blk_info.sz_cpr, ph.c_len); if (ph.c_len < ph.u_len) { blk_info.b_method = (unsigned char) ph.method; blk_info.b_ftid = (unsigned char) ph.filter; blk_info.b_cto8 = (unsigned char) ph.filter_cto; } fo->write(&blk_info, sizeof(blk_info)); b_len += sizeof(b_info); // write compressed data if (ph.c_len < ph.u_len) { fo->write(obuf, ph.c_len); verifyOverlappingDecompression(); // uses ph.u_adler } else { fo->write(ibuf, ph.u_len); } ph.u_adler = end_u_adler; total_in += ph.u_len; total_out += ph.c_len; } // update header with totals ph.u_len = total_in; ph.c_len = total_out; if ((off_t)total_in != file_size) { throwEOFException(); } return 1; // default: write end-of-compression bhdr next }
int PackVmlinuzARMEL::decompressKernel() { // read whole kernel image obuf.alloc(file_size); fi->seek(0, SEEK_SET); fi->readx(obuf, file_size); //checkAlreadyPacked(obuf + setup_size, UPX_MIN(file_size - setup_size, (off_t)1024)); // Find head.S: // bl decompress_kernel # 0xeb...... // b call_kernel # 0xea...... //LC0: .word LC0 # self! unsigned decompress_kernel = 0; unsigned caller1 = 0; unsigned caller2 = 0; unsigned got_start = 0; unsigned got_end = 0; for (unsigned j = 0; j < 0x400; j+=4) { unsigned w; if (j!=get_te32(j + obuf)) { continue; } if (0xea000000!=(0xff000000& get_te32(j - 4 + obuf)) || 0xeb000000!=(0xff000000&(w= get_te32(j - 8 + obuf))) ) { continue; } caller1 = j - 8; decompress_kernel = ((0x00ffffff & w)<<2) + 8+ caller1; for (unsigned k = 12; k<=128; k+=4) { w = get_te32(j - k + obuf); if (0xeb000000==(0xff000000 & w) && decompress_kernel==(((0x00ffffff & w)<<2) + 8+ j - k) ) { caller2 = j - k; break; } } got_start = get_te32(5*4 + j + obuf); got_end = get_te32(6*4 + j + obuf); #if 0 /*{*/ printf("decompress_kernel=0x%x got_start=0x%x got_end=0x%x\n", decompress_kernel, got_start, got_end); #endif /*}*/ break; } if (0==decompress_kernel) { return 0; } // Find first subroutine that is called by decompress_kernel, // which we will consider to be the start of the gunzip module // and the end of the non-gunzip modules. for (unsigned j = decompress_kernel; j < (unsigned)file_size; j+=4) { unsigned w = get_te32(j + obuf); if (0xeb800000==(0xff800000 & w)) { setup_size = 8+ ((0xff000000 | w)<<2) + j; // Move the GlobalOffsetTable. for (unsigned k = got_start; k < got_end; k+=4) { w = get_te32(k + obuf); // FIXME: must relocate w set_te32(k - got_start + setup_size + obuf, w); } setup_size += got_end - got_start; set_te32(&obuf[caller1], 0xeb000000 | (0x00ffffff & ((setup_size - (8+ caller1))>>2)) ); set_te32(&obuf[caller2], 0xeb000000 | (0x00ffffff & ((setup_size - (8+ caller2))>>2)) ); break; } }