int PackUnix::canUnpack() { int const small = 32 + sizeof(overlay_offset); // Allow zero-filled last page, for Mac OS X code signing. int bufsize = 2*4096 + 2*small +1; if (bufsize > fi->st_size()) bufsize = fi->st_size(); MemBuffer buf(bufsize); fi->seek(-(off_t)bufsize, SEEK_END); fi->readx(buf, bufsize); int i = bufsize; while (i > small && 0 == buf[--i]) { } i -= small; // allow incompressible extents if (i < 0 || !getPackHeader(buf + i, bufsize - i, true)) return false; int l = ph.buf_offset + ph.getPackHeaderSize(); if (l < 0 || l + 4 > bufsize) throwCantUnpack("file corrupted"); overlay_offset = get_te32(buf + i + l); if ((off_t)overlay_offset >= file_size) throwCantUnpack("file corrupted"); return true; }
int PackVmlinuzARMEL::readFileHeader() { unsigned int hdr[8]; fi->readx(hdr, sizeof(hdr)); for (int j=0; j < 8; ++j) { if (0xe1a00000!=get_te32(&hdr[j])) { return 0; } } return UPX_F_VMLINUZ_ARMEL; }
int PackUnix::canUnpack() { upx_byte buf[sizeof(overlay_offset) + 32]; const int bufsize = sizeof(buf); fi->seek(-bufsize, SEEK_END); fi->readx(buf, bufsize); if (!getPackHeader(buf, bufsize, true)) // allow incompressible extents return false; int l = ph.buf_offset + ph.getPackHeaderSize(); if (l < 0 || l + 4 > bufsize) throwCantUnpack("file corrupted"); overlay_offset = get_te32(buf+l); if ((off_t)overlay_offset >= file_size) throwCantUnpack("file corrupted"); return true; }
void PackLinuxElf32x86interp::unpack(OutputFile *fo) { #define MAX_INTERP_HDR 512 union { unsigned char buf[MAX_INTERP_HDR]; //struct { Elf32_Ehdr ehdr; Elf32_Phdr phdr; } e; } u; Elf32_Ehdr *const ehdr = (Elf32_Ehdr *) u.buf; Elf32_Phdr const *phdr = (Elf32_Phdr *) (u.buf + sizeof(*ehdr)); unsigned szb_info = sizeof(b_info); { fi->seek(0, SEEK_SET); fi->readx(u.buf, MAX_INTERP_HDR); unsigned const e_entry = get_te32(&ehdr->e_entry); if (e_entry < 0x401180) { /* old style, 8-byte b_info */ szb_info = 2*sizeof(unsigned); } } fi->seek(overlay_offset, SEEK_SET); p_info hbuf; fi->readx(&hbuf, sizeof(hbuf)); unsigned orig_file_size = get_te32(&hbuf.p_filesize); blocksize = get_te32(&hbuf.p_blocksize); if (file_size > (off_t)orig_file_size || blocksize > orig_file_size) throwCantUnpack("file header corrupted"); ibuf.alloc(blocksize + OVERHEAD); b_info bhdr; memset(&bhdr, 0, sizeof(bhdr)); fi->readx(&bhdr, szb_info); ph.u_len = get_te32(&bhdr.sz_unc); ph.c_len = get_te32(&bhdr.sz_cpr); ph.filter_cto = bhdr.b_cto8; // Uncompress Ehdr and Phdrs. fi->readx(ibuf, ph.c_len); decompress(ibuf, (upx_byte *)ehdr, false); unsigned total_in = 0; unsigned total_out = 0; unsigned c_adler = upx_adler32(NULL, 0); unsigned u_adler = upx_adler32(NULL, 0); off_t ptload0hi=0, ptload1lo=0, ptload1sz=0; // decompress PT_LOAD bool first_PF_X = true; fi->seek(- (off_t) (szb_info + ph.c_len), SEEK_CUR); for (unsigned j=0; j < ehdr->e_phnum; ++phdr, ++j) { if (PT_LOAD==phdr->p_type) { if (0==ptload0hi) { ptload0hi = phdr->p_filesz + phdr->p_offset; } else if (0==ptload1lo) { ptload1lo = phdr->p_offset; ptload1sz = phdr->p_filesz; } if (fo) fo->seek(phdr->p_offset, SEEK_SET); if (Elf32_Phdr::PF_X & phdr->p_flags) { unpackExtent(phdr->p_filesz, fo, total_in, total_out, c_adler, u_adler, first_PF_X, szb_info); first_PF_X = false; } else { unpackExtent(phdr->p_filesz, fo, total_in, total_out, c_adler, u_adler, false, szb_info); } } } if (0!=ptload1sz && ptload0hi < ptload1lo) { // alignment hole? if (fo) fo->seek(ptload0hi, SEEK_SET); unpackExtent(ptload1lo - ptload0hi, fo, total_in, total_out, c_adler, u_adler, false, szb_info); } if (total_out != orig_file_size) { // non-PT_LOAD stuff if (fo) fo->seek(0, SEEK_END); unpackExtent(orig_file_size - total_out, fo, total_in, total_out, c_adler, u_adler, false, szb_info); } // check for end-of-file fi->readx(&bhdr, szb_info); unsigned const sz_unc = ph.u_len = get_te32(&bhdr.sz_unc); if (sz_unc == 0) { // uncompressed size 0 -> EOF // note: magic is always stored le32 unsigned const sz_cpr = get_le32(&bhdr.sz_cpr); if (sz_cpr != UPX_MAGIC_LE32) // sz_cpr must be h->magic throwCompressedDataViolation(); } else { // extra bytes after end? throwCompressedDataViolation(); } // update header with totals ph.c_len = total_in; ph.u_len = total_out; // all bytes must be written if (total_out != orig_file_size) throwEOFException(); // finally test the checksums if (ph.c_adler != c_adler || ph.u_adler != u_adler) throwChecksumError(); #undef MAX_INTERP_HDR }
void PackUnix::unpack(OutputFile *fo) { unsigned const szb_info = sizeof(b_info); unsigned c_adler = upx_adler32(NULL, 0); unsigned u_adler = upx_adler32(NULL, 0); // defaults for ph.version == 8 unsigned orig_file_size = 0; blocksize = 512 * 1024; fi->seek(overlay_offset, SEEK_SET); if (ph.version > 8) { p_info hbuf; fi->readx(&hbuf, sizeof(hbuf)); orig_file_size = get_te32(&hbuf.p_filesize); blocksize = get_te32(&hbuf.p_blocksize); if (file_size > (off_t)orig_file_size || blocksize > orig_file_size) throwCantUnpack("file header corrupted"); } else { // skip 4 bytes (program id) fi->seek(4, SEEK_CUR); } if ((int)(blocksize + OVERHEAD) < 0) throwCantUnpack("blocksize corrupted"); ibuf.alloc(blocksize + OVERHEAD); // decompress blocks unsigned total_in = 0; unsigned total_out = 0; b_info bhdr; memset(&bhdr, 0, sizeof(bhdr)); for (;;) { #define buf ibuf int i; unsigned sz_unc, sz_cpr; fi->readx(&bhdr, szb_info); ph.u_len = sz_unc = get_te32(&bhdr.sz_unc); ph.c_len = sz_cpr = get_te32(&bhdr.sz_cpr); if (sz_unc == 0) // uncompressed size 0 -> EOF { // note: must reload sz_cpr as magic is always stored le32 sz_cpr = get_le32(&bhdr.sz_cpr); if (sz_cpr != UPX_MAGIC_LE32) // sz_cpr must be h->magic throwCompressedDataViolation(); break; } if (sz_unc <= 0 || sz_cpr <= 0) throwCompressedDataViolation(); if (sz_cpr > sz_unc || sz_unc > blocksize) throwCompressedDataViolation(); i = blocksize + OVERHEAD - sz_cpr; if (i < 0) throwCantUnpack("corrupt b_info"); fi->readx(buf+i, sz_cpr); // update checksum of compressed data c_adler = upx_adler32(buf + i, sz_cpr, c_adler); // decompress if (sz_cpr < sz_unc) { decompress(buf+i, buf, false); if (0!=bhdr.b_ftid) { Filter ft(ph.level); ft.init(bhdr.b_ftid); ft.cto = bhdr.b_cto8; ft.unfilter(buf, sz_unc); } i = 0; } // update checksum of uncompressed data u_adler = upx_adler32(buf + i, sz_unc, u_adler); total_in += sz_cpr; total_out += sz_unc; // write block if (fo) fo->write(buf + i, sz_unc); #undef buf } // update header with totals ph.c_len = total_in; ph.u_len = total_out; // all bytes must be written if (ph.version > 8 && total_out != orig_file_size) throwEOFException(); // finally test the checksums if (ph.c_adler != c_adler || ph.u_adler != u_adler) throwChecksumError(); }
void PackUnix::unpackExtent(unsigned wanted, OutputFile *fo, unsigned &total_in, unsigned &total_out, unsigned &c_adler, unsigned &u_adler, bool first_PF_X, unsigned szb_info ) { b_info hdr; memset(&hdr, 0, sizeof(hdr)); while (wanted) { fi->readx(&hdr, szb_info); int const sz_unc = ph.u_len = get_te32(&hdr.sz_unc); int const sz_cpr = ph.c_len = get_te32(&hdr.sz_cpr); ph.filter_cto = hdr.b_cto8; if (sz_unc == 0) { // must never happen while 0!=wanted throwCantUnpack("corrupt b_info"); break; } if (sz_unc <= 0 || sz_cpr <= 0) throwCantUnpack("corrupt b_info"); if (sz_cpr > sz_unc || sz_unc > (int)blocksize) throwCantUnpack("corrupt b_info"); int j = blocksize + OVERHEAD - sz_cpr; fi->readx(ibuf+j, sz_cpr); // update checksum of compressed data c_adler = upx_adler32(ibuf + j, sz_cpr, c_adler); // decompress if (sz_cpr < sz_unc) { decompress(ibuf+j, ibuf, false); if (12==szb_info) { // modern per-block filter if (hdr.b_ftid) { Filter ft(ph.level); // FIXME: ph.level for b_info? ft.init(hdr.b_ftid, 0); ft.cto = hdr.b_cto8; ft.unfilter(ibuf, sz_unc); } } else { // ancient per-file filter if (first_PF_X) { // Elf32_Ehdr is never filtered first_PF_X = false; // but everything else might be } else if (ph.filter) { Filter ft(ph.level); ft.init(ph.filter, 0); ft.cto = (unsigned char) ph.filter_cto; ft.unfilter(ibuf, sz_unc); } } j = 0; } // update checksum of uncompressed data u_adler = upx_adler32(ibuf + j, sz_unc, u_adler); total_in += sz_cpr; total_out += sz_unc; // write block if (fo) fo->write(ibuf + j, sz_unc); if (wanted < (unsigned)sz_unc) throwCantUnpack("corrupt b_info"); wanted -= sz_unc; } }
int PackVmlinuzARMEL::decompressKernel() { // read whole kernel image obuf.alloc(file_size); fi->seek(0, SEEK_SET); fi->readx(obuf, file_size); //checkAlreadyPacked(obuf + setup_size, UPX_MIN(file_size - setup_size, (off_t)1024)); // Find head.S: // bl decompress_kernel # 0xeb...... // b call_kernel # 0xea...... //LC0: .word LC0 # self! unsigned decompress_kernel = 0; unsigned caller1 = 0; unsigned caller2 = 0; unsigned got_start = 0; unsigned got_end = 0; for (unsigned j = 0; j < 0x400; j+=4) { unsigned w; if (j!=get_te32(j + obuf)) { continue; } if (0xea000000!=(0xff000000& get_te32(j - 4 + obuf)) || 0xeb000000!=(0xff000000&(w= get_te32(j - 8 + obuf))) ) { continue; } caller1 = j - 8; decompress_kernel = ((0x00ffffff & w)<<2) + 8+ caller1; for (unsigned k = 12; k<=128; k+=4) { w = get_te32(j - k + obuf); if (0xeb000000==(0xff000000 & w) && decompress_kernel==(((0x00ffffff & w)<<2) + 8+ j - k) ) { caller2 = j - k; break; } } got_start = get_te32(5*4 + j + obuf); got_end = get_te32(6*4 + j + obuf); #if 0 /*{*/ printf("decompress_kernel=0x%x got_start=0x%x got_end=0x%x\n", decompress_kernel, got_start, got_end); #endif /*}*/ break; } if (0==decompress_kernel) { return 0; } // Find first subroutine that is called by decompress_kernel, // which we will consider to be the start of the gunzip module // and the end of the non-gunzip modules. for (unsigned j = decompress_kernel; j < (unsigned)file_size; j+=4) { unsigned w = get_te32(j + obuf); if (0xeb800000==(0xff800000 & w)) { setup_size = 8+ ((0xff000000 | w)<<2) + j; // Move the GlobalOffsetTable. for (unsigned k = got_start; k < got_end; k+=4) { w = get_te32(k + obuf); // FIXME: must relocate w set_te32(k - got_start + setup_size + obuf, w); } setup_size += got_end - got_start; set_te32(&obuf[caller1], 0xeb000000 | (0x00ffffff & ((setup_size - (8+ caller1))>>2)) ); set_te32(&obuf[caller2], 0xeb000000 | (0x00ffffff & ((setup_size - (8+ caller2))>>2)) ); break; } }
// read full kernel into obuf[], gzip-decompress into ibuf[], // return decompressed size int PackVmlinuzI386::decompressKernel() { // read whole kernel image obuf.alloc(file_size); fi->seek(0, SEEK_SET); fi->readx(obuf, file_size); { const upx_byte *base = NULL; unsigned relocated = 0; // See startup_32: in linux/arch/i386/boot/compressed/head.S const upx_byte *p = &obuf[setup_size]; unsigned cpa_0 = 0; unsigned cpa_1 = 0; int j; if (0x205<=h.version) { cpa_0 = h.kernel_alignment; cpa_1 = 0u - cpa_0; } else for ((p = &obuf[setup_size]), (j= 0); j < 0x200; ++j, ++p) { if (0==memcmp("\x89\xeb\x81\xc3", p, 4) && 0==memcmp("\x81\xe3", 8+ p, 2)) { // movl %ebp,%ebx // addl $imm.w,%ebx // andl $imm.w,%ebx cpa_0 = 1+ get_te32( 4+ p); cpa_1 = get_te32(10+ p); break; } } for ((p = &obuf[setup_size]), (j= 0); j < 0x200; ++j, ++p) { if (0==memcmp("\x8d\x83", p, 2) // leal d32(%ebx),%eax && 0==memcmp("\xff\xe0", 6+ p, 2) // jmp *%eax ) { relocated = get_te32(2+ p); } if (0==memcmp("\xE8\x00\x00\x00\x00\x5D", p, 6)) { // "call 1f; 1f: pop %ebp" determines actual execution address. // linux-2.6.21 (spring 2007) and later; upx stub needs work // unless LOAD_PHYSICAL_ADDR is known. // Allowed code is: linux-2.6.23/arch/x86/head_32.S 2008-01-01 // call 1f // 1: popl %ebp // subl $1b, %ebp # 32-bit immediate // movl $LOAD_PHYSICAL_ADDR, %ebx // if (0==memcmp("\x81\xed", 6+ p, 2) // subl $imm.w,%ebp && 0==memcmp("\xbb", 12+ p, 1) ) { // movl $imm.w,%ebx physical_start = get_te32(13+ p); } else if (0==memcmp("\x81\xed", 6+ p, 2) // subl $imm.w,%ebp && is_pow2(cpa_0) && (0u-cpa_0)==cpa_1) { base = (5+ p) - get_te32(8+ p); config_physical_align = cpa_0; } else { throwCantPack("Unrecognized relocatable kernel"); } } // Find "ljmp $__BOOT_CS,$__PHYSICAL_START" if any. if (0==memcmp("\xEA\x00\x00", p, 3) && 0==(0xf & p[3]) && 0==p[4]) { /* whole megabyte < 16 MiB */ physical_start = get_te32(1+ p); break; } } if (base && relocated) { p = base + relocated; for (j = 0; j < 0x200; ++j, ++p) { if (0==memcmp("\x01\x9c\x0b", p, 3) // addl %ebx,d32(%ebx,%ecx) ) { page_offset = 0u - get_te32(3+ p); } if (0==memcmp("\x89\xeb", p, 2) // movl %ebp,%ebx && 0==memcmp("\x81\xeb", 2+ p, 2) // subl $imm32,%ebx ) { physical_start = get_te32(4+ p); } } } } checkAlreadyPacked(obuf + setup_size, UPX_MIN(file_size - setup_size, (off_t)1024)); int gzoff = setup_size; if (0x208<=h.version) { gzoff += h.payload_offset; } for (; gzoff < file_size; gzoff++) { // find gzip header (2 bytes magic + 1 byte method "deflated") int off = find(obuf + gzoff, file_size - gzoff, "\x1F\x8B\x08", 3); if (off < 0) break; gzoff += off; const int gzlen = (h.version < 0x208) ? (file_size - gzoff) : h.payload_length; if (gzlen < 256) break; // check gzip flag byte unsigned char flags = obuf[gzoff + 3]; if ((flags & 0xe0) != 0) // reserved bits set continue; //printf("found gzip header at offset %d\n", gzoff); // try to decompress int klen; int fd; off_t fd_pos; for (;;) { klen = -1; fd = -1; fd_pos = -1; // open fi->seek(gzoff, SEEK_SET); fd = dup(fi->getFd()); if (fd < 0) break; gzFile zf = gzdopen(fd, "rb"); if (zf == NULL) break; // estimate gzip-decompressed kernel size & alloc buffer if (ibuf.getSize() == 0) ibuf.alloc(gzlen * 3); // decompress klen = gzread(zf, ibuf, ibuf.getSize()); fd_pos = lseek(fd, 0, SEEK_CUR); gzclose(zf); fd = -1; if (klen != (int)ibuf.getSize()) break; // realloc and try again unsigned s = ibuf.getSize(); ibuf.dealloc(); ibuf.alloc(3 * s / 2); } if (fd >= 0) (void) close(fd); if (klen <= 0) continue; if (klen <= gzlen) continue; if (0x208<=h.version && 0==memcmp("\177ELF", ibuf, 4)) { // Full ELF in theory; for now, try to handle as .bin at physical_start. // Check for PT_LOAD.p_paddr being ascending and adjacent. Elf_LE32_Ehdr const *const ehdr = (Elf_LE32_Ehdr const *)(void const *)ibuf; Elf_LE32_Phdr const *phdr = (Elf_LE32_Phdr const *)(ehdr->e_phoff + (char const *)ehdr); Elf_LE32_Shdr const *shdr = (Elf_LE32_Shdr const *)(ehdr->e_shoff + (char const *)ehdr); unsigned hi_paddr = 0, lo_paddr = 0; unsigned delta_off = 0; for (unsigned j=0; j < ehdr->e_phnum; ++j, ++phdr) { if (phdr->PT_LOAD==phdr->p_type) { unsigned step = (hi_paddr + phdr->p_align - 1) & ~(phdr->p_align - 1); if (0==hi_paddr) { // first PT_LOAD if (physical_start!=phdr->p_paddr) { return 0; } delta_off = phdr->p_paddr - phdr->p_offset; lo_paddr = phdr->p_paddr; hi_paddr = phdr->p_filesz + phdr->p_paddr; } else if (step==phdr->p_paddr && delta_off==(phdr->p_paddr - phdr->p_offset)) { hi_paddr = phdr->p_filesz + phdr->p_paddr; } else { return 0; // Not equivalent to a .bin. Too complex for now. } } } // FIXME: ascending order is only a convention; might need sorting. for (unsigned j=1; j < ehdr->e_shnum; ++j) { if (shdr->SHT_PROGBITS==shdr->sh_type) { // SHT_REL might be intermixed if (shdr->SHF_EXECINSTR & shdr[j].sh_flags) { filter_len += shdr[j].sh_size; // FIXME: include sh_addralign } else { break; } } } memmove(ibuf, (lo_paddr - delta_off) + ibuf, hi_paddr - lo_paddr); // FIXME: set_size // FIXME: .bss ? Apparently handled by head.S } if (opt->force > 0) return klen; // some checks if (fd_pos != file_size) { //printf("fd_pos: %ld, file_size: %ld\n", (long)fd_pos, (long)file_size); // linux-2.6.21.5/arch/i386/boot/compressed/vmlinux.lds // puts .data.compressed ahead of .text, .rodata, etc; // so piggy.o need not be last in bzImage. Alas. //throwCantPack("trailing bytes after kernel image; use option '-f' to force packing"); } // see /usr/src/linux/arch/i386/kernel/head.S // 2.4.x: [cli;] cld; mov $...,%eax if (memcmp(ibuf, "\xFC\xB8", 2) == 0) goto head_ok; if (memcmp(ibuf, "\xFA\xFC\xB8", 3) == 0) goto head_ok; // 2.6.21.5 CONFIG_PARAVIRT mov %cs,%eax; test $3,%eax; jne ...; if (memcmp(ibuf, "\x8c\xc8\xa9\x03\x00\x00\x00\x0f\x85", 9) == 0) goto head_ok; if (memcmp(ibuf, "\x8c\xc8\xa8\x03\x0f\x85", 6) == 0) goto head_ok; // 2.6.x: [cli;] cld; lgdt ... if (memcmp(ibuf, "\xFC\x0F\x01", 3) == 0) goto head_ok; if (memcmp(ibuf, "\xFA\xFC\x0F\x01", 4) == 0) goto head_ok; // 2.6.x+grsecurity+strongswan+openwall+trustix: ljmp $0x10,... if (ibuf[0] == 0xEA && memcmp(ibuf+5, "\x10\x00", 2) == 0) goto head_ok; // x86_64 2.6.x if (0xB8==ibuf[0] // mov $...,%eax && 0x8E==ibuf[5] && 0xD8==ibuf[6] // mov %eax,%ds && 0x0F==ibuf[7] && 0x01==ibuf[8] && 020==(070 & ibuf[9]) // lgdtl && 0xB8==ibuf[14] // mov $...,%eax && 0x0F==ibuf[19] && 0xA2==ibuf[20] // cpuid ) goto head_ok; // cmpw $0x207,0x206(%esi) Debian vmlinuz-2.6.24-12-generic if (0==memcmp("\x66\x81\xbe\x06\x02\x00\x00\x07\x02", ibuf, 9)) goto head_ok; // testb $0x40,0x211(%esi) Fedora vmlinuz-2.6.25-0.218.rc8.git7.fc9.i686 if (0==memcmp("\xf6\x86\x11\x02\x00\x00\x40", ibuf, 7)) goto head_ok; // rex.W prefix for x86_64 if (0x48==ibuf[0]) throwCantPack("x86_64 bzImage is not yet supported"); throwCantPack("unrecognized kernel architecture; use option '-f' to force packing"); head_ok: // FIXME: more checks for special magic bytes in ibuf ??? // FIXME: more checks for kernel architecture ??? return klen; } return 0; }