void PackWcle::encodeEntryTable() { unsigned count,object,n; upx_byte *p = ientries; n = 0; while (*p) { count = *p; n += count; if (p[1] == 0) // unused bundle p += 2; else if (p[1] == 3) // 32-bit bundle { object = get_le16(p+2)-1; set_le16(p+2,1); p += 4; for (; count; count--, p += 5) set_le32(p+1,IOT(object,my_base_address) + get_le32(p+1)); } else throwCantPack("unsupported bundle type in entry table"); } //if (Opt_debug) printf("%d entries encoded.\n",n); UNUSED(n); soentries = ptr_diff(p, ientries) + 1; oentries = ientries; ientries = NULL; }
void PackTmt::unpack(OutputFile *fo) { Packer::handleStub(fi,fo,adam_offset); ibuf.alloc(ph.c_len); obuf.allocForUncompression(ph.u_len); fi->seek(adam_offset + ph.buf_offset + ph.getPackHeaderSize(),SEEK_SET); fi->readx(ibuf,ph.c_len); // decompress decompress(ibuf,obuf); // decode relocations const unsigned osize = ph.u_len - get_le32(obuf+ph.u_len-4); upx_byte *relocs = obuf + osize; const unsigned origstart = get_le32(obuf+ph.u_len-8); // unfilter if (ph.filter) { Filter ft(ph.level); ft.init(ph.filter, 0); ft.cto = (unsigned char) ph.filter_cto; if (ph.version < 11) ft.cto = (unsigned char) (get_le32(obuf+ph.u_len-12) >> 24); ft.unfilter(obuf, ptr_diff(relocs, obuf)); }
/** * Compute the mapping offset for the program / library. * * The .text section could say 0x500000 but the actual virtual memory * address where the library was mapped could be 0x600000. Hence looking * for addresses at 0x6xxxxx would not create any match with the symbol * addresses held in the file. * * The base given here should be the actual VM address where the kernel * loaded the first section. * * The computed offset will then be automatically used to adjust the given * addresses being looked at, remapping them to the proper range for lookup * purposes. * * @param bc the BFD context (NULL allowed for convenience) * @param base the VM mapping address of the text segment */ void bfd_util_compute_offset(bfd_ctx_t *bc, ulong base) { asection *sec; bfd *b; if (NULL == bc) return; /* Convenience */ bfd_ctx_check(bc); if (bc->offseted || NULL == bc->handle) return; mutex_lock_fast(&bc->lock); if (bc->offseted) { mutex_unlock_fast(&bc->lock); return; } b = bc->handle; if (NULL == b) { mutex_unlock_fast(&bc->lock); return; } /* * Take the first section of the file and look where its page would start. * Then compare that to the advertised mapping base for the object to * know the offset we have to apply for proper symbol resolution. */ sec = b->sections; /* * Notes for later: sections are linked through sec->next. * * It is possible to gather the section name via: * const char *name = bfd_section_name(b, sec); */ if (sec != NULL) { bfd_vma addr = bfd_section_vma(b, sec); bc->offset = ptr_diff(vmm_page_start(ulong_to_pointer(addr)), vmm_page_start(ulong_to_pointer(base))); } bc->offseted = TRUE; mutex_unlock_fast(&bc->lock); }
/** * Add child to the node, carrying an IP:port. * * @param t the tree node where child must be added * @param name the name of the child * @param addr the IP address * @param port the port address * * @return the added child node */ static g2_tree_t * g2_build_add_host(g2_tree_t *t, const char *name, host_addr_t addr, uint16 port) { struct packed_host_addr packed; uint alen; char payload[18]; /* Large enough for IPv6 as well, one day? */ void *p; g2_tree_t *c; packed = host_addr_pack(addr); alen = packed_host_addr_size(packed) - 1; /* skip network byte */ p = mempcpy(payload, &packed.addr, alen); p = poke_le16(p, port); c = g2_tree_alloc_copy(name, payload, ptr_diff(p, payload)); g2_tree_add_child(t, c); return c; }
/** * Serialization convenience for IP:port. * * Write the IP:port (IP as big-endian, port as little-endian) into the * supplied buffer, whose length MUST be 18 bytes at least. * * If len is non-NULL, it is written with the length of the serialized data. * * @return pointer following serialization data. */ void * host_ip_port_poke(void *p, const host_addr_t addr, uint16 port, size_t *len) { void *q = p; switch (host_addr_net(addr)) { case NET_TYPE_IPV4: q = poke_be32(q, host_addr_ipv4(addr)); break; case NET_TYPE_IPV6: q = mempcpy(q, host_addr_ipv6(&addr), sizeof addr.addr.ipv6); break; case NET_TYPE_LOCAL: case NET_TYPE_NONE: g_assert_not_reached(); } q = poke_le16(q, port); if (len != NULL) *len = ptr_diff(q, p); return q; }
unsigned ptr_udiff(const void *p1, const void *p2) { int d = ptr_diff(p1, p2); assert(d >= 0); return ACC_ICONV(unsigned, d); }
void PackWcle::decodeFixups() { upx_byte *p = oimage + soimage; iimage.dealloc(); MemBuffer tmpbuf; unsigned fixupn = unoptimizeReloc32(&p,oimage,&tmpbuf,1); MemBuffer wrkmem(8*fixupn+8); unsigned ic,jc,o,r; for (ic=0; ic<fixupn; ic++) { jc=get_le32(tmpbuf+4*ic); set_le32(wrkmem+ic*8,jc); o = soobject_table; r = get_le32(oimage+jc); virt2rela(oobject_table,&o,&r); set_le32(wrkmem+ic*8+4,OOT(o-1,my_base_address)); set_le32(oimage+jc,r); } set_le32(wrkmem+ic*8,0xFFFFFFFF); // end of 32-bit offset fixups tmpbuf.dealloc(); // selector fixups and self-relative fixups const upx_byte *selector_fixups = p; const upx_byte *selfrel_fixups = p; while (*selfrel_fixups != 0xC3) selfrel_fixups += 9; selfrel_fixups++; unsigned selectlen = ptr_diff(selfrel_fixups, selector_fixups)/9; ofixups = New(upx_byte, fixupn*9+1000+selectlen*5); upx_bytep fp = ofixups; for (ic = 1, jc = 0; ic <= opages; ic++) { // self relative fixups while ((r = get_le32(selfrel_fixups))/mps == ic-1) { fp[0] = 8; set_le16(fp+2,r & (mps-1)); o = 4+get_le32(oimage+r); set_le32(oimage+r,0); r += o; o = soobject_table; virt2rela(oobject_table,&o,&r); fp[4] = (unsigned char) o; set_le32(fp+5,r); fp[1] = (unsigned char) (r > 0xFFFF ? 0x10 : 0); fp += fp[1] ? 9 : 7; selfrel_fixups += 4; dputc('r',stdout); } // selector fixups while (selectlen && (r = get_le32(selector_fixups+5))/mps == ic-1) { fp[0] = 2; fp[1] = 0; set_le16(fp+2,r & (mps-1)); unsigned x = selector_fixups[1] > 0xD0 ? oh.init_ss_object : oh.init_cs_object; fp[4] = (unsigned char) x; fp += 5; selector_fixups += 9; selectlen--; dputc('s',stdout); } // 32 bit offset fixups while (get_le32(wrkmem+4*jc) < ic*mps) { if (jc > 1 && ((get_le32(wrkmem+4*(jc-2))+3) & (mps-1)) < 3) // cross page fixup? { r = get_le32(oimage+get_le32(wrkmem+4*(jc-2))); fp[0] = 7; fp[1] = (unsigned char) (r > 0xFFFF ? 0x10 : 0); set_le16(fp+2,get_le32(wrkmem+4*(jc-2)) | ~3); set_le32(fp+5,r); o = soobject_table; r = get_le32(wrkmem+4*(jc-1)); virt2rela(oobject_table,&o,&r); fp[4] = (unsigned char) o; fp += fp[1] ? 9 : 7; dputc('0',stdout); } o = soobject_table; r = get_le32(wrkmem+4*(jc+1)); virt2rela(oobject_table,&o,&r); r = get_le32(oimage+get_le32(wrkmem+4*jc)); fp[0] = 7; fp[1] = (unsigned char) (r > 0xFFFF ? 0x10 : 0); set_le16(fp+2,get_le32(wrkmem+4*jc) & (mps-1)); fp[4] = (unsigned char) o; set_le32(fp+5,r); fp += fp[1] ? 9 : 7; jc += 2; } set_le32(ofpage_table+ic,ptr_diff(fp,ofixups)); } for (ic=0; ic < FIXUP_EXTRA; ic++) *fp++ = 0; sofixups = ptr_diff(fp, ofixups); }
void PackWcle::preprocessFixups() { big_relocs = 0; unsigned ic,jc; Array(unsigned, counts, objects+2); countFixups(counts); for (ic = jc = 0; ic < objects; ic++) jc += counts[ic]; if (jc == 0) { // FIXME: implement this throwCantPack("files without relocations are not supported"); } ByteArray(rl, jc); ByteArray(srf, counts[objects+0]+1); ByteArray(slf, counts[objects+1]+1); upx_byte *selector_fixups = srf; upx_byte *selfrel_fixups = slf; unsigned rc = 0; upx_byte *fix = ifixups; for (ic = jc = 0; ic < pages; ic++) { while ((unsigned)(fix - ifixups) < get_le32(ifpage_table+ic+1)) { const int fixp2 = get_le16_signed(fix+2); unsigned value; switch (*fix) { case 2: // selector fixup if (fixp2 < 0) { // cross page selector fixup dputc('S',stdout); fix += 5; break; } dputc('s',stdout); memcpy(selector_fixups,"\x8C\xCB\x66\x89\x9D",5); // mov bx, cs ; mov [xxx+ebp], bx if (IOT(fix[4]-1,flags) & LEOF_WRITE) selector_fixups[1] = 0xDB; // ds set_le32(selector_fixups+5,jc+fixp2); selector_fixups += 9; fix += 5; break; case 5: // 16-bit offset if ((unsigned)fixp2 < 4096 && IOT(fix[4]-1,my_base_address) == jc) dputc('6',stdout); else throwCantPack("unsupported 16-bit offset relocation"); fix += (fix[1] & 0x10) ? 9 : 7; break; case 6: // 16:32 pointer if (fixp2 < 0) { // cross page pointer fixup dputc('P',stdout); fix += (fix[1] & 0x10) ? 9 : 7; break; } dputc('p',stdout); memcpy(iimage+jc+fixp2,fix+5,(fix[1] & 0x10) ? 4 : 2); set_le32(rl+4*rc++,jc+fixp2); set_le32(iimage+jc+fixp2,get_le32(iimage+jc+fixp2)+IOT(fix[4]-1,my_base_address)); memcpy(selector_fixups,"\x8C\xCA\x66\x89\x95",5); if (IOT(fix[4]-1,flags) & LEOF_WRITE) selector_fixups[1] = 0xDA; // ds set_le32(selector_fixups+5,jc+fixp2+4); selector_fixups += 9; fix += (fix[1] & 0x10) ? 9 : 7; break; case 7: // 32-bit offset if (fixp2 < 0) { fix += (fix[1] & 0x10) ? 9 : 7; break; } //if (memcmp(iimage+jc+fixp2,fix+5,(fix[1] & 0x10) ? 4 : 2)) // throwCantPack("illegal fixup offset"); // work around a pmwunlite bug: remove duplicated fixups // FIXME: fix the other cases too if (rc == 0 || get_le32(rl+4*rc-4) != jc+fixp2) { set_le32(rl+4*rc++,jc+fixp2); set_le32(iimage+jc+fixp2,get_le32(iimage+jc+fixp2)+IOT(fix[4]-1,my_base_address)); } fix += (fix[1] & 0x10) ? 9 : 7; break; case 8: // 32-bit self relative fixup if (fixp2 < 0) { // cross page self relative fixup dputc('R',stdout); fix += (fix[1] & 0x10) ? 9 : 7; break; } value = get_le32(fix+5); if (fix[1] == 0) value &= 0xffff; set_le32(iimage+jc+fixp2,(value+IOT(fix[4]-1,my_base_address))-jc-fixp2-4); set_le32(selfrel_fixups,jc+fixp2); selfrel_fixups += 4; dputc('r',stdout); fix += (fix[1] & 0x10) ? 9 : 7; break; default: throwCantPack("unsupported fixup record"); } } jc += mps; } // resize ifixups if it's too small if (sofixups < 1000) { delete[] ifixups; ifixups = new upx_byte[1000]; } fix = optimizeReloc32 (rl,rc,ifixups,iimage,1,&big_relocs); has_extra_code = srf != selector_fixups; // FIXME: this could be removed if has_extra_code = false // but then we'll need a flag *selector_fixups++ = 0xC3; // ret memcpy(fix,srf,selector_fixups-srf); // copy selector fixup code fix += selector_fixups-srf; memcpy(fix,slf,selfrel_fixups-slf); // copy self-relative fixup positions fix += selfrel_fixups-slf; set_le32(fix,0xFFFFFFFFUL); fix += 4; sofixups = ptr_diff(fix, ifixups); }
/** * Incrementally process more data. * * @param zs the zlib stream object * @param amount amount of data to process * @param maxout maximum length of dynamically-allocated buffer (0 = none) * @param may_close whether to allow closing when all data was consumed * @param finish whether this is the last data to process * * @return -1 on error, 1 if work remains, 0 when done. */ static int zlib_stream_process_step(zlib_stream_t *zs, int amount, size_t maxout, bool may_close, bool finish) { z_streamp z; int remaining; int process; bool finishing; int ret = 0; g_assert(amount > 0); g_assert(!zs->closed); z = zs->z; g_assert(z != NULL); /* Stream not closed yet */ /* * Compute amount of input data to process. */ remaining = zs->inlen - ptr_diff(z->next_in, zs->in); g_assert(remaining >= 0); process = MIN(remaining, amount); finishing = process == remaining; /* * Process data. */ z->avail_in = process; resume: switch (zs->magic) { case ZLIB_DEFLATER_MAGIC: ret = deflate(z, finishing && finish ? Z_FINISH : 0); break; case ZLIB_INFLATER_MAGIC: ret = inflate(z, Z_SYNC_FLUSH); break; } switch (ret) { case Z_OK: if (0 == z->avail_out) { if (zlib_stream_grow_output(zs, maxout)) goto resume; /* Process remaining input */ goto error; /* Cannot continue */ } return 1; /* Need to call us again */ /* NOTREACHED */ case Z_BUF_ERROR: /* Output full or need more input to continue */ if (0 == z->avail_out) { if (zlib_stream_grow_output(zs, maxout)) goto resume; /* Process remaining input */ goto error; /* Cannot continue */ } if (0 == z->avail_in) return 1; /* Need to call us again */ goto error; /* Cannot continue */ /* NOTREACHED */ case Z_STREAM_END: /* Reached end of input stream */ g_assert(finishing); /* * Supersede the output length to let them probe how much data * was processed once the stream is closed, through calls to * zlib_deflater_outlen() or zlib_inflater_outlen(). */ zs->outlen = ptr_diff(z->next_out, zs->out); g_assert(zs->outlen > 0); if (may_close) { switch (zs->magic) { case ZLIB_DEFLATER_MAGIC: ret = deflateEnd(z); break; case ZLIB_INFLATER_MAGIC: ret = inflateEnd(z); break; } if (ret != Z_OK) { g_carp("%s(): while freeing zstream: %s", G_STRFUNC, zlib_strerror(ret)); } WFREE(z); zs->z = NULL; } zs->closed = TRUE; /* Signals processing stream done */ return 0; /* Done */ /* NOTREACHED */ default: break; } /* FALL THROUGH */ error: g_carp("%s(): error during %scompression: %s " "(avail_in=%u, avail_out=%u, total_in=%lu, total_out=%lu)", G_STRFUNC, ZLIB_DEFLATER_MAGIC == zs->magic ? "" : "de", zlib_strerror(ret), z->avail_in, z->avail_out, z->total_in, z->total_out); if (may_close) { switch (zs->magic) { case ZLIB_DEFLATER_MAGIC: ret = deflateEnd(z); case ZLIB_INFLATER_MAGIC: ret = inflateEnd(z); break; } if (ret != Z_OK && ret != Z_DATA_ERROR) { g_carp("%s(): while freeing stream: %s", G_STRFUNC, zlib_strerror(ret)); } WFREE(z); zs->z = NULL; } return -1; /* Error! */ }
/** * Called from gwc_parse_dispatch_lines() for each complete line of output. * * @return FALSE to stop processing of any remaining data. */ static bool gwc_host_line(struct gwc_parse_context *ctx, const char *buf, size_t len) { int c; if (GNET_PROPERTY(bootstrap_debug) > 3) g_message("BOOT GWC host line (%lu bytes): %s", (ulong) len, buf); if (is_strprefix(buf, "ERROR")) { g_warning("GWC cache \"%s\" returned %s", http_async_url(ctx->handle), buf); http_async_cancel(ctx->handle); return FALSE; } if (len <= 2) return TRUE; /* Skip this line silently */ /* * A line starting with "H|" is a host, with "U|" a GWC URL. * Letters are case-insensitive. */ if (buf[1] != '|') goto malformed; c = ascii_toupper(buf[0]); if ('H' == c) { host_addr_t addr; uint16 port; if (string_to_host_addr_port(&buf[2], NULL, &addr, &port)) { ctx->processed++; hcache_add_caught(HOST_G2HUB, addr, port, "GWC"); if (GNET_PROPERTY(bootstrap_debug) > 1) { g_message("BOOT (G2) collected %s from GWC %s", host_addr_port_to_string(addr, port), http_async_url(ctx->handle)); } } return TRUE; } else if ('U' == c) { char *end = strchr(&buf[2], '|'); char *url; if (NULL == end) goto malformed; ctx->processed++; url = h_strndup(&buf[2], ptr_diff(end, &buf[2])); gwc_add(url); hfree(url); return TRUE; } else if ('I' == c) { return TRUE; /* Ignore information line */ } /* * If we come here, we did not recognize the line properly. */ if (GNET_PROPERTY(bootstrap_debug) > 2) { g_warning("GWC ignoring unknown line \"%s\" from %s", buf, http_async_url(ctx->handle)); } return TRUE; malformed: if (GNET_PROPERTY(bootstrap_debug)) { g_warning("GWC ignoring malformed line \"%s\" from %s", buf, http_async_url(ctx->handle)); } return TRUE; }
void PackArmPe::pack(OutputFile *fo) { // FIXME: we need to think about better support for --exact if (opt->exact) throwCantPackExact(); const unsigned objs = ih.objects; isection = new pe_section_t[objs]; fi->seek(pe_offset+sizeof(ih),SEEK_SET); fi->readx(isection,sizeof(pe_section_t)*objs); rvamin = isection[0].vaddr; infoHeader("[Processing %s, format %s, %d sections]", fn_basename(fi->getName()), getName(), objs); // check the PE header // FIXME: add more checks if (!opt->force && ( (ih.cpu != 0x1c0 && ih.cpu != 0x1c2) || (ih.opthdrsize != 0xe0) || ((ih.flags & EXECUTABLE) == 0) || (ih.subsystem != 9) || (ih.entry == 0 /*&& !isdll*/) || (ih.ddirsentries != 16) // || IDSIZE(PEDIR_EXCEPTION) // is this used on arm? // || IDSIZE(PEDIR_COPYRIGHT) )) throwCantPack("unexpected value in PE header (try --force)"); if (IDSIZE(PEDIR_SEC)) IDSIZE(PEDIR_SEC) = IDADDR(PEDIR_SEC) = 0; // throwCantPack("compressing certificate info is not supported"); if (IDSIZE(PEDIR_COMRT)) throwCantPack(".NET files (win32/net) are not yet supported"); if (isdll) opt->win32_pe.strip_relocs = false; else if (opt->win32_pe.strip_relocs < 0) opt->win32_pe.strip_relocs = (ih.imagebase >= 0x10000); if (opt->win32_pe.strip_relocs) { if (ih.imagebase < 0x10000) throwCantPack("--strip-relocs is not allowed when imagebase < 0x10000"); else ih.flags |= RELOCS_STRIPPED; } if (memcmp(isection[0].name,"UPX",3) == 0) throwAlreadyPackedByUPX(); if (!opt->force && IDSIZE(15)) throwCantPack("file is possibly packed/protected (try --force)"); if (ih.entry && ih.entry < rvamin) throwCantPack("run a virus scanner on this file!"); if (!opt->force && ih.subsystem == 1) throwCantPack("subsystem 'native' is not supported (try --force)"); if (ih.filealign < 0x200) throwCantPack("filealign < 0x200 is not yet supported"); handleStub(fi,fo,pe_offset); const unsigned usize = ih.imagesize; const unsigned xtrasize = UPX_MAX(ih.datasize, 65536u) + IDSIZE(PEDIR_IMPORT) + IDSIZE(PEDIR_BOUNDIM) + IDSIZE(PEDIR_IAT) + IDSIZE(PEDIR_DELAYIMP) + IDSIZE(PEDIR_RELOC); ibuf.alloc(usize + xtrasize); // BOUND IMPORT support. FIXME: is this ok? fi->seek(0,SEEK_SET); fi->readx(ibuf,isection[0].rawdataptr); Interval holes(ibuf); unsigned ic,jc,overlaystart = 0; ibuf.clear(0, usize); for (ic = jc = 0; ic < objs; ic++) { if (isection[ic].rawdataptr && overlaystart < isection[ic].rawdataptr + isection[ic].size) overlaystart = ALIGN_UP(isection[ic].rawdataptr + isection[ic].size,ih.filealign); if (isection[ic].vsize == 0) isection[ic].vsize = isection[ic].size; if ((isection[ic].flags & PEFL_BSS) || isection[ic].rawdataptr == 0 || (isection[ic].flags & PEFL_INFO)) { holes.add(isection[ic].vaddr,isection[ic].vsize); continue; } if (isection[ic].vaddr + isection[ic].size > usize) throwCantPack("section size problem"); if (((isection[ic].flags & (PEFL_WRITE|PEFL_SHARED)) == (PEFL_WRITE|PEFL_SHARED))) if (!opt->force) throwCantPack("writable shared sections not supported (try --force)"); if (jc && isection[ic].rawdataptr - jc > ih.filealign) throwCantPack("superfluous data between sections"); fi->seek(isection[ic].rawdataptr,SEEK_SET); jc = isection[ic].size; if (jc > isection[ic].vsize) jc = isection[ic].vsize; if (isection[ic].vsize == 0) // hack for some tricky programs - may this break other progs? jc = isection[ic].vsize = isection[ic].size; if (isection[ic].vaddr + jc > ibuf.getSize()) throwInternalError("buffer too small 1"); fi->readx(ibuf + isection[ic].vaddr,jc); jc += isection[ic].rawdataptr; } // check for NeoLite if (find(ibuf + ih.entry, 64+7, "NeoLite", 7) >= 0) throwCantPack("file is already compressed with another packer"); unsigned overlay = file_size - stripDebug(overlaystart); if (overlay >= (unsigned) file_size) { #if 0 if (overlay < file_size + ih.filealign) overlay = 0; else if (!opt->force) throwNotCompressible("overlay problem (try --force)"); #endif overlay = 0; } checkOverlay(overlay); Resource res; Interval tlsiv(ibuf); Export xport((char*)(unsigned char*)ibuf); const unsigned dllstrings = processImports(); processTls(&tlsiv); // call before processRelocs!! processResources(&res); processExports(&xport); processRelocs(); //OutputFile::dump("x1", ibuf, usize); // some checks for broken linkers - disable filter if necessary bool allow_filter = true; if (ih.codebase == ih.database || ih.codebase + ih.codesize > ih.imagesize || (isection[virta2objnum(ih.codebase,isection,objs)].flags & PEFL_CODE) == 0) allow_filter = false; const unsigned oam1 = ih.objectalign - 1; // FIXME: disabled: the uncompressor would not allocate enough memory //objs = tryremove(IDADDR(PEDIR_RELOC),objs); // FIXME: if the last object has a bss then this won't work // newvsize = (isection[objs-1].vaddr + isection[objs-1].size + oam1) &~ oam1; // temporary solution: unsigned newvsize = (isection[objs-1].vaddr + isection[objs-1].vsize + oam1) &~ oam1; //fprintf(stderr,"newvsize=%x objs=%d\n",newvsize,objs); if (newvsize + soimport + sorelocs > ibuf.getSize()) throwInternalError("buffer too small 2"); memcpy(ibuf+newvsize,oimport,soimport); memcpy(ibuf+newvsize+soimport,orelocs,sorelocs); cimports = newvsize - rvamin; // rva of preprocessed imports crelocs = cimports + soimport; // rva of preprocessed fixups ph.u_len = newvsize + soimport + sorelocs; // some extra data for uncompression support unsigned s = 0; upx_byte * const p1 = ibuf + ph.u_len; memcpy(p1 + s,&ih,sizeof (ih)); s += sizeof (ih); memcpy(p1 + s,isection,ih.objects * sizeof(*isection)); s += ih.objects * sizeof(*isection); if (soimport) { set_le32(p1 + s,cimports); set_le32(p1 + s + 4,dllstrings); s += 8; } if (sorelocs) { set_le32(p1 + s,crelocs); p1[s + 4] = (unsigned char) (big_relocs & 6); s += 5; } if (soresources) { set_le16(p1 + s,icondir_count); s += 2; } // end of extra data set_le32(p1 + s,ptr_diff(p1,ibuf) - rvamin); s += 4; ph.u_len += s; obuf.allocForCompression(ph.u_len); // prepare packheader ph.u_len -= rvamin; // prepare filter Filter ft(ph.level); ft.buf_len = ih.codesize; ft.addvalue = ih.codebase - rvamin; // compress int filter_strategy = allow_filter ? 0 : -3; // disable filters for files with broken headers if (ih.codebase + ih.codesize > ph.u_len) { ft.buf_len = 1; filter_strategy = -3; } // limit stack size needed for runtime decompression upx_compress_config_t cconf; cconf.reset(); cconf.conf_lzma.max_num_probs = 1846 + (768 << 4); // ushort: ~28 KiB stack compressWithFilters(&ft, 2048, &cconf, filter_strategy, ih.codebase, rvamin, 0, NULL, 0); // info: see buildLoader() newvsize = (ph.u_len + rvamin + ph.overlap_overhead + oam1) &~ oam1; /* if (tlsindex && ((newvsize - ph.c_len - 1024 + oam1) &~ oam1) > tlsindex + 4) tlsindex = 0; */ const unsigned lsize = getLoaderSize(); int identsize = 0; const unsigned codesize = getLoaderSection("IDENTSTR",&identsize); assert(identsize > 0); getLoaderSection("UPX1HEAD",(int*)&ic); identsize += ic; pe_section_t osection[4]; // section 0 : bss // 1 : [ident + header] + packed_data + unpacker + tls // 2 : not compressed data // 3 : resource data -- wince 5 needs a new section for this // identsplit - number of ident + (upx header) bytes to put into the PE header int identsplit = pe_offset + sizeof(osection) + sizeof(oh); if ((identsplit & 0x1ff) == 0) identsplit = 0; else if (((identsplit + identsize) ^ identsplit) < 0x200) identsplit = identsize; else identsplit = ALIGN_GAP(identsplit, 0x200); ic = identsize - identsplit; const unsigned c_len = ((ph.c_len + ic) & 15) == 0 ? ph.c_len : ph.c_len + 16 - ((ph.c_len + ic) & 15); obuf.clear(ph.c_len, c_len - ph.c_len); const unsigned s1size = ALIGN_UP(ic + c_len + codesize,4u) + sotls; const unsigned s1addr = (newvsize - (ic + c_len) + oam1) &~ oam1; const unsigned ncsection = (s1addr + s1size + oam1) &~ oam1; const unsigned upxsection = s1addr + ic + c_len; Reloc rel(1024); // new relocations are put here static const char* symbols_to_relocate[] = { "ONAM", "BIMP", "BREL", "FIBE", "FIBS", "ENTR", "DST0", "SRC0" }; for (unsigned s2r = 0; s2r < TABLESIZE(symbols_to_relocate); s2r++) { unsigned off = linker->getSymbolOffset(symbols_to_relocate[s2r]); if (off != 0xdeaddead) rel.add(off + upxsection, 3); } // new PE header memcpy(&oh,&ih,sizeof(oh)); oh.filealign = 0x200; // identsplit depends on this memset(osection,0,sizeof(osection)); oh.entry = upxsection; oh.objects = 4; oh.chksum = 0; // fill the data directory ODADDR(PEDIR_DEBUG) = 0; ODSIZE(PEDIR_DEBUG) = 0; ODADDR(PEDIR_IAT) = 0; ODSIZE(PEDIR_IAT) = 0; ODADDR(PEDIR_BOUNDIM) = 0; ODSIZE(PEDIR_BOUNDIM) = 0; // tls is put into section 1 ic = s1addr + s1size - sotls; super::processTls(&rel,&tlsiv,ic); ODADDR(PEDIR_TLS) = sotls ? ic : 0; ODSIZE(PEDIR_TLS) = sotls ? 0x18 : 0; ic += sotls; // these are put into section 2 ic = ncsection; // wince wants relocation data at the beginning of a section processRelocs(&rel); ODADDR(PEDIR_RELOC) = soxrelocs ? ic : 0; ODSIZE(PEDIR_RELOC) = soxrelocs; ic += soxrelocs; processImports(ic, linker->getSymbolOffset("IATT") + upxsection); ODADDR(PEDIR_IMPORT) = ic; ODSIZE(PEDIR_IMPORT) = soimpdlls; ic += soimpdlls; processExports(&xport,ic); ODADDR(PEDIR_EXPORT) = soexport ? ic : 0; ODSIZE(PEDIR_EXPORT) = soexport; if (!isdll && opt->win32_pe.compress_exports) { ODADDR(PEDIR_EXPORT) = IDADDR(PEDIR_EXPORT); ODSIZE(PEDIR_EXPORT) = IDSIZE(PEDIR_EXPORT); } ic += soexport; ic = (ic + oam1) &~ oam1; const unsigned res_start = ic; if (soresources) processResources(&res,ic); ODADDR(PEDIR_RESOURCE) = soresources ? ic : 0; ODSIZE(PEDIR_RESOURCE) = soresources; ic += soresources; const unsigned onam = ncsection + soxrelocs + ih.imagebase; linker->defineSymbol("start_of_dll_names", onam); linker->defineSymbol("start_of_imports", ih.imagebase + rvamin + cimports); linker->defineSymbol("start_of_relocs", crelocs + rvamin + ih.imagebase); linker->defineSymbol("filter_buffer_end", ih.imagebase + ih.codebase + ih.codesize); linker->defineSymbol("filter_buffer_start", ih.imagebase + ih.codebase); linker->defineSymbol("original_entry", ih.entry + ih.imagebase); linker->defineSymbol("uncompressed_length", ph.u_len); linker->defineSymbol("start_of_uncompressed", ih.imagebase + rvamin); linker->defineSymbol("compressed_length", ph.c_len); linker->defineSymbol("start_of_compressed", ih.imagebase + s1addr + identsize - identsplit); defineDecompressorSymbols(); relocateLoader(); MemBuffer loader(lsize); memcpy(loader, getLoader(), lsize); patchPackHeader(loader, lsize); // this is computed here, because soxrelocs changes some lines above const unsigned ncsize = soxrelocs + soimpdlls + soexport; const unsigned fam1 = oh.filealign - 1; // fill the sections strcpy(osection[0].name,"UPX0"); strcpy(osection[1].name,"UPX1"); strcpy(osection[2].name, "UPX2"); strcpy(osection[3].name, ".rsrc"); osection[0].vaddr = rvamin; osection[1].vaddr = s1addr; osection[2].vaddr = ncsection; osection[3].vaddr = res_start; osection[0].size = 0; osection[1].size = (s1size + fam1) &~ fam1; osection[2].size = (ncsize + fam1) &~ fam1; osection[3].size = (soresources + fam1) &~ fam1; osection[0].vsize = osection[1].vaddr - osection[0].vaddr; //osection[1].vsize = (osection[1].size + oam1) &~ oam1; //osection[2].vsize = (osection[2].size + oam1) &~ oam1; osection[1].vsize = osection[1].size; osection[2].vsize = osection[2].size; osection[3].vsize = osection[3].size; osection[0].rawdataptr = 0; osection[1].rawdataptr = (pe_offset + sizeof(oh) + sizeof(osection) + fam1) &~ fam1; osection[2].rawdataptr = osection[1].rawdataptr + osection[1].size; osection[3].rawdataptr = osection[2].rawdataptr + osection[2].size; osection[0].flags = (unsigned) (PEFL_BSS|PEFL_EXEC|PEFL_WRITE|PEFL_READ); osection[1].flags = (unsigned) (PEFL_DATA|PEFL_EXEC|PEFL_WRITE|PEFL_READ); osection[2].flags = (unsigned) (PEFL_DATA|PEFL_READ); osection[3].flags = (unsigned) (PEFL_DATA|PEFL_READ); oh.imagesize = (osection[3].vaddr + osection[3].vsize + oam1) &~ oam1; oh.bsssize = osection[0].vsize; oh.datasize = osection[2].vsize + osection[3].vsize; oh.database = osection[2].vaddr; oh.codesize = osection[1].vsize; oh.codebase = osection[1].vaddr; oh.headersize = osection[1].rawdataptr; if (rvamin < osection[0].rawdataptr) throwCantPack("object alignment too small"); if (opt->win32_pe.strip_relocs && !isdll) oh.flags |= RELOCS_STRIPPED; //for (ic = 0; ic < oh.filealign; ic += 4) // set_le32(ibuf + ic,get_le32("UPX ")); ibuf.clear(0, oh.filealign); info("Image size change: %u -> %u KiB", ih.imagesize / 1024, oh.imagesize / 1024); infoHeader("[Writing compressed file]"); if (soresources == 0) { oh.objects = 3; memset(&osection[3], 0, sizeof(osection[3])); } // write loader + compressed file fo->write(&oh,sizeof(oh)); fo->write(osection,sizeof(osection)); // some alignment if (identsplit == identsize) { unsigned n = osection[1].rawdataptr - fo->getBytesWritten() - identsize; assert(n <= oh.filealign); fo->write(ibuf, n); } fo->write(loader + codesize,identsize); infoWriting("loader", fo->getBytesWritten()); fo->write(obuf,c_len); infoWriting("compressed data", c_len); fo->write(loader,codesize); if (opt->debug.dump_stub_loader) OutputFile::dump(opt->debug.dump_stub_loader, loader, codesize); if ((ic = fo->getBytesWritten() & 3) != 0) fo->write(ibuf,4 - ic); fo->write(otls,sotls); if ((ic = fo->getBytesWritten() & fam1) != 0) fo->write(ibuf,oh.filealign - ic); fo->write(oxrelocs,soxrelocs); fo->write(oimpdlls,soimpdlls); fo->write(oexport,soexport); if ((ic = fo->getBytesWritten() & fam1) != 0) fo->write(ibuf,oh.filealign - ic); fo->write(oresources,soresources); if ((ic = fo->getBytesWritten() & fam1) != 0) fo->write(ibuf,oh.filealign - ic); #if 0 printf("%-13s: program hdr : %8ld bytes\n", getName(), (long) sizeof(oh)); printf("%-13s: sections : %8ld bytes\n", getName(), (long) sizeof(osection)); printf("%-13s: ident : %8ld bytes\n", getName(), (long) identsize); printf("%-13s: compressed : %8ld bytes\n", getName(), (long) c_len); printf("%-13s: decompressor : %8ld bytes\n", getName(), (long) codesize); printf("%-13s: tls : %8ld bytes\n", getName(), (long) sotls); printf("%-13s: resources : %8ld bytes\n", getName(), (long) soresources); printf("%-13s: imports : %8ld bytes\n", getName(), (long) soimpdlls); printf("%-13s: exports : %8ld bytes\n", getName(), (long) soexport); printf("%-13s: relocs : %8ld bytes\n", getName(), (long) soxrelocs); #endif // verify verifyOverlappingDecompression(); // copy the overlay copyOverlay(fo, overlay, &obuf); // finally check the compression ratio if (!checkFinalCompressionRatio(fo)) throwNotCompressible(); }
unsigned PackArmPe::processImports() // pass 1 { static const unsigned char kernel32dll[] = "COREDLL.dll"; static const char llgpa[] = "\x0\x0""LoadLibraryW\x0\x0" "GetProcAddressA\x0\x0\x0" "CacheSync"; //static const char exitp[] = "ExitProcess\x0\x0\x0"; unsigned dllnum = 0; import_desc *im = (import_desc*) (ibuf + IDADDR(PEDIR_IMPORT)); import_desc * const im_save = im; if (IDADDR(PEDIR_IMPORT)) { while (im->dllname) dllnum++, im++; im = im_save; } struct udll { const upx_byte *name; const upx_byte *shname; unsigned ordinal; unsigned iat; LE32 *lookupt; unsigned npos; unsigned original_position; bool isk32; static int __acc_cdecl_qsort compare(const void *p1, const void *p2) { const udll *u1 = * (const udll * const *) p1; const udll *u2 = * (const udll * const *) p2; if (u1->isk32) return -1; if (u2->isk32) return 1; if (!*u1->lookupt) return 1; if (!*u2->lookupt) return -1; int rc = strcasecmp(u1->name,u2->name); if (rc) return rc; if (u1->ordinal) return -1; if (u2->ordinal) return 1; if (!u1->shname) return 1; if (!u2->shname) return -1; return strlen(u1->shname) - strlen(u2->shname); } }; // +1 for dllnum=0 Array(struct udll, dlls, dllnum+1); Array(struct udll *, idlls, dllnum+1); soimport = 1024; // safety unsigned ic,k32o; for (ic = k32o = 0; dllnum && im->dllname; ic++, im++) { idlls[ic] = dlls + ic; dlls[ic].name = ibuf + im->dllname; dlls[ic].shname = NULL; dlls[ic].ordinal = 0; dlls[ic].iat = im->iat; dlls[ic].lookupt = (LE32*) (ibuf + (im->oft ? im->oft : im->iat)); dlls[ic].npos = 0; dlls[ic].original_position = ic; dlls[ic].isk32 = strcasecmp(kernel32dll,dlls[ic].name) == 0; soimport += strlen(dlls[ic].name) + 1 + 4; for (IPTR_I(LE32, tarr, dlls[ic].lookupt); *tarr; tarr += 1) { if (*tarr & 0x80000000) { importbyordinal = true; soimport += 2; // ordinal num: 2 bytes dlls[ic].ordinal = *tarr & 0xffff; //if (dlls[ic].isk32) // kernel32ordinal = true,k32o++; } else { IPTR_I(const upx_byte, n, ibuf + *tarr + 2); unsigned len = strlen(n); soimport += len + 1; if (dlls[ic].shname == NULL || len < strlen (dlls[ic].shname)) dlls[ic].shname = n; } soimport++; // separator } } oimport = new upx_byte[soimport]; memset(oimport,0,soimport); oimpdlls = new upx_byte[soimport]; memset(oimpdlls,0,soimport); qsort(idlls,dllnum,sizeof (udll*),udll::compare); unsigned dllnamelen = sizeof (kernel32dll); unsigned dllnum2 = 1; for (ic = 0; ic < dllnum; ic++) if (!idlls[ic]->isk32 && (ic == 0 || strcasecmp(idlls[ic - 1]->name,idlls[ic]->name))) { dllnum2++; dllnamelen += strlen(idlls[ic]->name) + 1; } //fprintf(stderr,"dllnum=%d dllnum2=%d soimport=%d\n",dllnum,dllnum2,soimport); // info("Processing imports: %d DLLs", dllnum); // create the new import table im = (import_desc*) oimpdlls; LE32 *ordinals = (LE32*) (oimpdlls + (dllnum2 + 1) * sizeof(import_desc)); LE32 *lookuptable = ordinals + 4;// + k32o + (isdll ? 0 : 1); upx_byte *dllnames = ((upx_byte*) lookuptable) + (dllnum2 - 1) * 8; upx_byte *importednames = dllnames + (dllnamelen &~ 1); unsigned k32namepos = ptr_diff(dllnames,oimpdlls); memcpy(importednames, llgpa, ALIGN_UP((unsigned) sizeof(llgpa), 2u)); strcpy(dllnames,kernel32dll); im->dllname = k32namepos; im->iat = ptr_diff(ordinals,oimpdlls); *ordinals++ = ptr_diff(importednames,oimpdlls); // LoadLibraryW *ordinals++ = ptr_diff(importednames,oimpdlls) + 14; // GetProcAddressA *ordinals++ = ptr_diff(importednames,oimpdlls) + 14 + 18; // CacheSync dllnames += sizeof(kernel32dll); importednames += sizeof(llgpa); im++; for (ic = 0; ic < dllnum; ic++) if (idlls[ic]->isk32) { idlls[ic]->npos = k32namepos; /* if (idlls[ic]->ordinal) for (LE32 *tarr = idlls[ic]->lookupt; *tarr; tarr++) if (*tarr & 0x80000000) *ordinals++ = *tarr; */ } else if (ic && strcasecmp(idlls[ic-1]->name,idlls[ic]->name) == 0) idlls[ic]->npos = idlls[ic-1]->npos; else { im->dllname = idlls[ic]->npos = ptr_diff(dllnames,oimpdlls); im->iat = ptr_diff(lookuptable,oimpdlls); strcpy(dllnames,idlls[ic]->name); dllnames += strlen(idlls[ic]->name)+1; if (idlls[ic]->ordinal) *lookuptable = idlls[ic]->ordinal + 0x80000000; else if (idlls[ic]->shname) { if (ptr_diff(importednames,oimpdlls) & 1) importednames--; *lookuptable = ptr_diff(importednames,oimpdlls); importednames += 2; strcpy(importednames,idlls[ic]->shname); importednames += strlen(idlls[ic]->shname) + 1; } lookuptable += 2; im++; } soimpdlls = ALIGN_UP(ptr_diff(importednames,oimpdlls),4); Interval names(ibuf),iats(ibuf),lookups(ibuf); // create the preprocessed data //ordinals -= k32o; upx_byte *ppi = oimport; // preprocessed imports for (ic = 0; ic < dllnum; ic++) { LE32 *tarr = idlls[ic]->lookupt; #if 0 && ENABLE_THIS_AND_UNCOMPRESSION_WILL_BREAK if (!*tarr) // no imports from this dll continue; #endif set_le32(ppi,idlls[ic]->npos); set_le32(ppi+4,idlls[ic]->iat - rvamin); ppi += 8; for (; *tarr; tarr++) if (*tarr & 0x80000000) { /*if (idlls[ic]->isk32) { *ppi++ = 0xfe; // signed + odd parity set_le32(ppi,ptr_diff(ordinals,oimpdlls)); ordinals++; ppi += 4; } else*/ { *ppi++ = 0xff; set_le16(ppi,*tarr & 0xffff); ppi += 2; } } else { *ppi++ = 1; unsigned len = strlen(ibuf + *tarr + 2) + 1; memcpy(ppi,ibuf + *tarr + 2,len); ppi += len; names.add(*tarr,len + 2 + 1); } ppi++; unsigned esize = ptr_diff((char *)tarr, (char *)idlls[ic]->lookupt); lookups.add(idlls[ic]->lookupt,esize); if (ptr_diff(ibuf + idlls[ic]->iat, (char *)idlls[ic]->lookupt)) { memcpy(ibuf + idlls[ic]->iat, idlls[ic]->lookupt, esize); iats.add(idlls[ic]->iat,esize); } names.add(idlls[ic]->name,strlen(idlls[ic]->name) + 1 + 1); } ppi += 4; assert(ppi < oimport+soimport); soimport = ptr_diff(ppi,oimport); if (soimport == 4) soimport = 0; //OutputFile::dump("x0.imp", oimport, soimport); //OutputFile::dump("x1.imp", oimpdlls, soimpdlls); unsigned ilen = 0; names.flatten(); if (names.ivnum > 1) { // The area occupied by the dll and imported names is not continuous // so to still support uncompression, I can't zero the iat area. // This decreases compression ratio, so FIXME somehow. infoWarning("can't remove unneeded imports"); ilen += sizeof(import_desc) * dllnum; #if defined(DEBUG) if (opt->verbose > 3) names.dump(); #endif // do some work for the unpacker im = im_save; for (ic = 0; ic < dllnum; ic++, im++) { memset(im,FILLVAL,sizeof(*im)); im->dllname = ptr_diff(dlls[idlls[ic]->original_position].name,ibuf); } } else { iats.add(im_save,sizeof(import_desc) * dllnum); // zero unneeded data iats.clear(); lookups.clear(); } names.clear(); iats.add(&names); iats.add(&lookups); iats.flatten(); for (ic = 0; ic < iats.ivnum; ic++) ilen += iats.ivarr[ic].len; info("Imports: original size: %u bytes, preprocessed size: %u bytes",ilen,soimport); return names.ivnum == 1 ? names.ivarr[0].start : 0; }
void PackArmPe::rebuildImports(upx_byte *& extrainfo) { if (ODADDR(PEDIR_IMPORT) == 0 || ODSIZE(PEDIR_IMPORT) <= sizeof(import_desc)) return; // const upx_byte * const idata = obuf + get_le32(extrainfo); OPTR_C(const upx_byte, idata, obuf + get_le32(extrainfo)); const unsigned inamespos = get_le32(extrainfo + 4); extrainfo += 8; unsigned sdllnames = 0; // const upx_byte *import = ibuf + IDADDR(PEDIR_IMPORT) - isection[2].vaddr; // const upx_byte *p; IPTR_I(const upx_byte, import, ibuf + IDADDR(PEDIR_IMPORT) - isection[2].vaddr); OPTR(const upx_byte, p); for (p = idata; get_le32(p) != 0; ++p) { const upx_byte *dname = get_le32(p) + import; ICHECK(dname, 1); const unsigned dlen = strlen(dname); ICHECK(dname, dlen + 1); sdllnames += dlen + 1; for (p += 8; *p;) if (*p == 1) p += strlen(++p) + 1; else if (*p == 0xff) p += 3; // ordinal else p += 5; } sdllnames = ALIGN_UP(sdllnames,2u); upx_byte * const Obuf = obuf - rvamin; import_desc * const im0 = (import_desc*) (Obuf + ODADDR(PEDIR_IMPORT)); import_desc *im = im0; upx_byte *dllnames = Obuf + inamespos; upx_byte *importednames = dllnames + sdllnames; upx_byte * const importednames_start = importednames; for (p = idata; get_le32(p) != 0; ++p) { // restore the name of the dll const upx_byte *dname = get_le32(p) + import; ICHECK(dname, 1); const unsigned dlen = strlen(dname); ICHECK(dname, dlen + 1); const unsigned iatoffs = get_le32(p + 4) + rvamin; if (inamespos) { // now I rebuild the dll names OCHECK(dllnames, dlen + 1); strcpy(dllnames, dname); im->dllname = ptr_diff(dllnames,Obuf); //;;;printf("\ndll: %s:",dllnames); dllnames += dlen + 1; } else { OCHECK(Obuf + im->dllname, dlen + 1); strcpy(Obuf + im->dllname, dname); } im->oft = im->iat = iatoffs; // LE32 *newiat = (LE32 *) (Obuf + iatoffs); OPTR_I(LE32, newiat, (LE32 *) (Obuf + iatoffs)); // restore the imported names+ordinals for (p += 8; *p; ++newiat) if (*p == 1) { const unsigned ilen = strlen(++p) + 1; if (inamespos) { if (ptr_diff(importednames, importednames_start) & 1) importednames -= 1; omemcpy(importednames + 2, p, ilen); //;;;printf(" %s",importednames+2); *newiat = ptr_diff(importednames, Obuf); importednames += 2 + ilen; } else { OCHECK(Obuf + *newiat + 2, ilen + 1); strcpy(Obuf + *newiat + 2, p); } p += ilen; } else if (*p == 0xff) { *newiat = get_le16(p + 1) + 0x80000000; //;;;printf(" %x",(unsigned)*newiat); p += 3; } else { *newiat = get_le32(get_le32(p + 1) + import); assert(*newiat & 0x80000000); p += 5; } *newiat = 0; im++; } //memset(idata,0,p - idata); }
static int parse_control_msg(const struct msghdr *msg) { const struct cmsghdr *cmsg_ptr; RUNTIME_ASSERT(msg); if (NULL == (cmsg_ptr = CMSG_FIRSTHDR(msg))) { /* ignoring datagram without control data */ return -1; } for (/* NOTHING */; cmsg_ptr; cmsg_ptr = cmsg_next_header(msg, cmsg_ptr)) { if (SOL_SOCKET == cmsg_ptr->cmsg_level) { switch (cmsg_ptr->cmsg_type) { #ifdef SCM_CREDS case SCM_CREDS: { const struct sockcred *cred; cred = cast_to_const_void_ptr(CMSG_DATA(cmsg_ptr)); debug_msg("SCM_CREDS" "(sc_uid=%lu, sc_euid=%lu, sc_gid=%lu, sc_egid=%lu)", (unsigned long) cred->sc_uid, (unsigned long) cred->sc_euid, (unsigned long) cred->sc_gid, (unsigned long) cred->sc_egid); } break; #endif /* SCM_CREDS */ case SCM_RIGHTS: { size_t len; len = cmsg_ptr->cmsg_len - ptr_diff(CMSG_DATA(cmsg_ptr), cmsg_ptr); if (len == sizeof (int)) { struct stat sb; const int *fd_ptr; int fd; fd_ptr = (const int *) CMSG_DATA(cmsg_ptr); fd = *fd_ptr; if (0 != fstat(fd, &sb)) { debug_error("fstat() failed"); close(fd); fd = -1; } else { if (S_IFSOCK != (sb.st_mode & S_IFMT)) { debug_msg("Not a socket (fd=%d, st_uid=%lu, st_gid=%lu", fd, (unsigned long) sb.st_uid, (unsigned long) sb.st_gid); } return fd; } } else { debug_msg("Bad length (%lu)", (unsigned long) len); } } break; default: debug_msg("unknown cmsg type (%u)", (unsigned) cmsg_ptr->cmsg_type); break; } } else { debug_msg("unknown cmsg level (%u)", (unsigned) cmsg_ptr->cmsg_level); } } return -1; }
/** * Add file to the current query hit. * * @return TRUE if we kept the file, FALSE if we did not include it in the hit. */ static bool g2_build_qh2_add(struct g2_qh2_builder *ctx, const shared_file_t *sf) { const sha1_t *sha1; g2_tree_t *h, *c; shared_file_check(sf); /* * Make sure the file is still in the library. */ if (0 == shared_file_index(sf)) return FALSE; /* * On G2, the H/URN child is required, meaning we need the SHA1 at least. */ if (!sha1_hash_available(sf)) return FALSE; /* * Do not send duplicates, as determined by the SHA1 of the resource. * * A user may share several files with different names but the same SHA1, * and if all of them are hits, we only want to send one instance. * * When generating hits for host-browsing, we do not care about duplicates * and ctx->hs is NULL then. */ sha1 = shared_file_sha1(sf); /* This is an atom */ if (ctx->hs != NULL) { if (hset_contains(ctx->hs, sha1)) return FALSE; hset_insert(ctx->hs, sha1); } /* * Create the "H" child and attach it to the current tree. */ if (NULL == ctx->t) g2_build_qh2_start(ctx); h = g2_tree_alloc_empty("H"); g2_tree_add_child(ctx->t, h); /* * URN -- Universal Resource Name * * If there is a known TTH, then we can generate a bitprint, otherwise * we just convey the SHA1. */ { const tth_t * const tth = shared_file_tth(sf); char payload[SHA1_RAW_SIZE + TTH_RAW_SIZE + sizeof G2_URN_BITPRINT]; char *p = payload; if (NULL == tth) { p = mempcpy(p, G2_URN_SHA1, sizeof G2_URN_SHA1); p += clamp_memcpy(p, sizeof payload - ptr_diff(p, payload), sha1, SHA1_RAW_SIZE); } else { p = mempcpy(p, G2_URN_BITPRINT, sizeof G2_URN_BITPRINT); p += clamp_memcpy(p, sizeof payload - ptr_diff(p, payload), sha1, SHA1_RAW_SIZE); p += clamp_memcpy(p, sizeof payload - ptr_diff(p, payload), tth, TTH_RAW_SIZE); } g_assert(ptr_diff(p, payload) <= sizeof payload); c = g2_tree_alloc_copy("URN", payload, ptr_diff(p, payload)); g2_tree_add_child(h, c); } /* * URL -- empty to indicate that we share the file via uri-res. */ if (ctx->flags & QHIT_F_G2_URL) { uint known; uint16 csc; c = g2_tree_alloc_empty("URL"); g2_tree_add_child(h, c); /* * CSC -- if we know alternate sources, indicate how many in "CSC". * * This child is only emitted when they requested "URL". */ known = dmesh_count(sha1); csc = MIN(known, MAX_INT_VAL(uint16)); if (csc != 0) { char payload[2]; poke_le16(payload, csc); c = g2_tree_alloc_copy("CSC", payload, sizeof payload); g2_tree_add_child(h, c); } /* * PART -- if we only have a partial file, indicate how much we have. * * This child is only emitted when they requested "URL". */ if (shared_file_is_partial(sf) && !shared_file_is_finished(sf)) { filesize_t available = shared_file_available(sf); char payload[8]; /* If we have to encode file size as 64-bit */ uint32 av32; time_t mtime = shared_file_modification_time(sf); c = g2_tree_alloc_empty("PART"); g2_tree_add_child(h, c); av32 = available; if (av32 == available) { /* Fits within a 32-bit quantity */ poke_le32(payload, av32); g2_tree_set_payload(c, payload, sizeof av32, TRUE); } else { /* Encode as a 64-bit quantity then */ poke_le64(payload, available); g2_tree_set_payload(c, payload, sizeof payload, TRUE); } /* * GTKG extension: encode the last modification time of the * partial file in an "MT" child. This lets the other party * determine whether the host is still able to actively complete * the file. */ poke_le32(payload, (uint32) mtime); g2_tree_add_child(c, g2_tree_alloc_copy("MT", payload, sizeof(uint32))); } /* * CT -- creation time of the resource (GTKG extension). */ { time_t create_time = shared_file_creation_time(sf); if ((time_t) -1 != create_time) { char payload[8]; int n; create_time = MAX(0, create_time); n = vlint_encode(create_time, payload); g2_tree_add_child(h, g2_tree_alloc_copy("CT", payload, n)); /* No trailing 0s */ } } } /* * DN -- distinguished name. * * Note that the presence of DN also governs the presence of SZ if the * file length does not fit a 32-bit unsigned quantity. */ if (ctx->flags & QHIT_F_G2_DN) { char payload[8]; /* If we have to encode file size as 64-bit */ uint32 fs32; filesize_t fs = shared_file_size(sf); const char *name; const char *rp; c = g2_tree_alloc_empty("DN"); fs32 = fs; if (fs32 == fs) { /* Fits within a 32-bit quantity */ poke_le32(payload, fs32); g2_tree_set_payload(c, payload, sizeof fs32, TRUE); } else { /* Does not fit a 32-bit quantity, emit a SZ child */ poke_le64(payload, fs); g2_tree_add_child(h, g2_tree_alloc_copy("SZ", payload, sizeof payload)); } name = shared_file_name_nfc(sf); g2_tree_append_payload(c, name, shared_file_name_nfc_len(sf)); g2_tree_add_child(h, c); /* * GTKG extension: if there is a file path, expose it as a "P" child * under the DN node. */ rp = shared_file_relative_path(sf); if (rp != NULL) { g2_tree_add_child(c, g2_tree_alloc_copy("P", rp, strlen(rp))); } } /* * GTKG extension: if they requested alt-locs in the /Q2/I with "A", then * send them some known alt-locs in an "ALT" child. * * Note that these alt-locs can be for Gnutella hosts: since both Gnutella * and G2 share a common HTTP-based file transfer mechanism with compatible * extra headers, there is no need to handle them separately. */ if (ctx->flags & QHIT_F_G2_ALT) { gnet_host_t hvec[G2_BUILD_QH2_MAX_ALT]; int hcnt = 0; hcnt = dmesh_fill_alternate(sha1, hvec, N_ITEMS(hvec)); if (hcnt > 0) { int i; c = g2_tree_alloc_empty("ALT"); for (i = 0; i < hcnt; i++) { host_addr_t addr; uint16 port; addr = gnet_host_get_addr(&hvec[i]); port = gnet_host_get_port(&hvec[i]); if (host_addr_is_ipv4(addr)) { char payload[6]; host_ip_port_poke(payload, addr, port, NULL); g2_tree_append_payload(c, payload, sizeof payload); } } /* * If the payload is still empty, then drop the "ALT" child. * Otherwise, attach it to the "H" node. */ if (NULL == g2_tree_node_payload(c, NULL)) { g2_tree_free_null(&c); } else { g2_tree_add_child(h, c); } } } /* * Update the size of the query hit we're generating. */ ctx->current_size += g2_frame_serialize(h, NULL, 0); return TRUE; }
void PackTmt::pack(OutputFile *fo) { big_relocs = 0; Packer::handleStub(fi,fo,adam_offset); const unsigned usize = ih.imagesize; const unsigned rsize = ih.relocsize; ibuf.alloc(usize+rsize+128); obuf.allocForCompression(usize+rsize+128); MemBuffer wrkmem; wrkmem.alloc(rsize+EXTRA_INFO); // relocations fi->seek(adam_offset+sizeof(ih),SEEK_SET); fi->readx(ibuf,usize); fi->readx(wrkmem+4,rsize); const unsigned overlay = file_size - fi->tell(); if (find_le32(ibuf,128,get_le32("UPX ")) >= 0) throwAlreadyPacked(); if (rsize == 0) throwCantPack("file is already compressed with another packer"); checkOverlay(overlay); unsigned relocsize = 0; //if (rsize) { for (unsigned ic=4; ic<=rsize; ic+=4) set_le32(wrkmem+ic,get_le32(wrkmem+ic)-4); relocsize = ptr_diff(optimizeReloc32(wrkmem+4,rsize/4,wrkmem,ibuf,1,&big_relocs), wrkmem); } wrkmem[relocsize++] = 0; set_le32(wrkmem+relocsize,ih.entry); // save original entry point relocsize += 4; set_le32(wrkmem+relocsize,relocsize+4); relocsize += 4; memcpy(ibuf+usize,wrkmem,relocsize); // prepare packheader ph.u_len = usize + relocsize; // prepare filter Filter ft(ph.level); ft.buf_len = usize; // compress upx_compress_config_t cconf; cconf.reset(); // limit stack size needed for runtime decompression cconf.conf_lzma.max_num_probs = 1846 + (768 << 4); // ushort: ~28 KiB stack compressWithFilters(&ft, 512, &cconf); const unsigned lsize = getLoaderSize(); const unsigned s_point = getLoaderSection("TMTMAIN1"); int e_len = getLoaderSectionStart("TMTCUTPO"); const unsigned d_len = lsize - e_len; assert(e_len > 0 && s_point > 0); // patch loader linker->defineSymbol("original_entry", ih.entry); defineDecompressorSymbols(); defineFilterSymbols(&ft); linker->defineSymbol("bytes_to_copy", ph.c_len + d_len); linker->defineSymbol("copy_dest", 0u - (ph.u_len + ph.overlap_overhead + d_len - 1)); linker->defineSymbol("copy_source", ph.c_len + lsize - 1); //fprintf(stderr,"\nelen=%x dlen=%x copy_len=%x copy_to=%x oo=%x jmp_pos=%x ulen=%x c_len=%x \n\n", // e_len,d_len,copy_len,copy_to,ph.overlap_overhead,jmp_pos,ph.u_len,ph.c_len); linker->defineSymbol("TMTCUTPO", ph.u_len + ph.overlap_overhead); relocateLoader(); MemBuffer loader(lsize); memcpy(loader,getLoader(),lsize); patchPackHeader(loader,e_len); memcpy(&oh,&ih,sizeof(oh)); oh.imagesize = ph.c_len + lsize; // new size oh.entry = s_point; // new entry point oh.relocsize = 4; // write loader + compressed file fo->write(&oh,sizeof(oh)); fo->write(loader,e_len); fo->write(obuf,ph.c_len); fo->write(loader+lsize-d_len,d_len); // decompressor char rel_entry[4]; set_le32(rel_entry,5 + s_point); fo->write(rel_entry,sizeof (rel_entry)); // verify verifyOverlappingDecompression(); // copy the overlay copyOverlay(fo, overlay, &obuf); // finally check the compression ratio if (!checkFinalCompressionRatio(fo)) throwNotCompressible(); }
/** * Store big value in the .dat file, writing to the supplied block numbers. * * @param db the sdbm database * @param bvec start of block vector, containing block numbers * @param data start of data to write * @param len length of data to write * * @return -1 on error with errno set, 0 if OK. */ static int big_store(DBM *db, const void *bvec, const void *data, size_t len) { DBMBIG *dbg = db->big; int bcnt = bigbcnt(len); int n; const void *p; const char *q; size_t remain; g_return_val_if_fail(NULL == dbg->bitcheck, -1); if (-1 == dbg->fd && -1 == big_open(dbg)) return -1; /* * Look at the amount of consecutive block numbers we have to be able * to write into them via a single system call. */ n = bcnt; p = bvec; q = data; remain = len; while (n > 0) { size_t towrite = MIN(remain, BIG_BLKSIZE); guint32 bno = peek_be32(p); guint32 prev_bno = bno; p = const_ptr_add_offset(p, sizeof(guint32)); n--; remain = size_saturate_sub(remain, towrite); while (n > 0) { guint32 next_bno = peek_be32(p); size_t amount; if (next_bno <= prev_bno) /* Block numbers are sorted */ goto corrupted_page; if (next_bno - prev_bno != 1) break; /* Not consecutive */ prev_bno = next_bno; p = const_ptr_add_offset(p, sizeof(guint32)); amount = MIN(remain, BIG_BLKSIZE); towrite += amount; n--; remain = size_saturate_sub(remain, amount); } dbg->bigwrite++; if (-1 == compat_pwrite(dbg->fd, q, towrite, OFF_DAT(bno))) { g_warning("sdbm: \"%s\": " "could not write %lu bytes starting at data block #%u: %s", sdbm_name(db), (unsigned long) towrite, bno, g_strerror(errno)); ioerr(db, TRUE); return -1; } q += towrite; dbg->bigwrite_blk += bigblocks(towrite); g_assert(ptr_diff(q, data) <= len); } g_assert(ptr_diff(q, data) == len); return 0; corrupted_page: g_warning("sdbm: \"%s\": corrupted page: %d big data block%s not sorted", sdbm_name(db), bcnt, 1 == bcnt ? "" : "s"); ioerr(db, FALSE); errno = EFAULT; /* Data corrupted somehow (.pag file) */ return -1; }
void find ( const char *arg ) { string_ref sr1; string_ref sr2; const char *p; // Look for each character in the string(searching from the start) p = arg; sr1 = arg; while ( *p ) { string_ref::size_type pos = sr1.find(*p); BOOST_CHECK ( pos != string_ref::npos && ( pos <= ptr_diff ( p, arg ))); ++p; } // Look for each character in the string (searching from the end) p = arg; sr1 = arg; while ( *p ) { string_ref::size_type pos = sr1.rfind(*p); BOOST_CHECK ( pos != string_ref::npos && pos < sr1.size () && ( pos >= ptr_diff ( p, arg ))); ++p; } // Look for pairs on characters (searching from the start) sr1 = arg; p = arg; while ( *p && *(p+1)) { string_ref sr3 ( p, 2 ); string_ref::size_type pos = sr1.find ( sr3 ); BOOST_CHECK ( pos != string_ref::npos && pos <= static_cast<string_ref::size_type>( p - arg )); p++; } sr1 = arg; p = arg; // for all possible chars, see if we find them in the right place. // Note that strchr will/might do the _wrong_ thing if we search for NULL for ( int ch = 1; ch < 256; ++ch ) { string_ref::size_type pos = sr1.find(ch); const char *strp = std::strchr ( arg, ch ); BOOST_CHECK (( strp == NULL ) == ( pos == string_ref::npos )); if ( strp != NULL ) BOOST_CHECK ( ptr_diff ( strp, arg ) == pos ); } sr1 = arg; p = arg; // for all possible chars, see if we find them in the right place. // Note that strchr will/might do the _wrong_ thing if we search for NULL for ( int ch = 1; ch < 256; ++ch ) { string_ref::size_type pos = sr1.rfind(ch); const char *strp = std::strrchr ( arg, ch ); BOOST_CHECK (( strp == NULL ) == ( pos == string_ref::npos )); if ( strp != NULL ) BOOST_CHECK ( ptr_diff ( strp, arg ) == pos ); } // Find everything at the start p = arg; sr1 = arg; while ( !sr1.empty ()) { string_ref::size_type pos = sr1.find(*p); BOOST_CHECK ( pos == 0 ); sr1.remove_prefix (1); ++p; } // Find everything at the end sr1 = arg; p = arg + strlen ( arg ) - 1; while ( !sr1.empty ()) { string_ref::size_type pos = sr1.rfind(*p); BOOST_CHECK ( pos == sr1.size () - 1 ); sr1.remove_suffix (1); --p; } // Find everything at the start sr1 = arg; p = arg; while ( !sr1.empty ()) { string_ref::size_type pos = sr1.find_first_of(*p); BOOST_CHECK ( pos == 0 ); sr1.remove_prefix (1); ++p; } // Find everything at the end sr1 = arg; p = arg + strlen ( arg ) - 1; while ( !sr1.empty ()) { string_ref::size_type pos = sr1.find_last_of(*p); BOOST_CHECK ( pos == sr1.size () - 1 ); sr1.remove_suffix (1); --p; } // Basic sanity checking for "find_first_of / find_first_not_of" sr1 = arg; sr2 = arg; while ( !sr1.empty() ) { BOOST_CHECK ( sr1.find_first_of ( sr2 ) == 0 ); BOOST_CHECK ( sr1.find_first_not_of ( sr2 ) == string_ref::npos ); sr1.remove_prefix ( 1 ); } p = arg; sr1 = arg; while ( *p ) { string_ref::size_type pos1 = sr1.find_first_of(*p); string_ref::size_type pos2 = sr1.find_first_not_of(*p); BOOST_CHECK ( pos1 != string_ref::npos && pos1 < sr1.size () && pos1 <= ptr_diff ( p, arg )); if ( pos2 != string_ref::npos ) { for ( size_t i = 0 ; i < pos2; ++i ) BOOST_CHECK ( sr1[i] == *p ); BOOST_CHECK ( sr1 [ pos2 ] != *p ); } BOOST_CHECK ( pos2 != pos1 ); ++p; } // Basic sanity checking for "find_last_of / find_last_not_of" sr1 = arg; sr2 = arg; while ( !sr1.empty() ) { BOOST_CHECK ( sr1.find_last_of ( sr2 ) == ( sr1.size () - 1 )); BOOST_CHECK ( sr1.find_last_not_of ( sr2 ) == string_ref::npos ); sr1.remove_suffix ( 1 ); } p = arg; sr1 = arg; while ( *p ) { string_ref::size_type pos1 = sr1.find_last_of(*p); string_ref::size_type pos2 = sr1.find_last_not_of(*p); BOOST_CHECK ( pos1 != string_ref::npos && pos1 < sr1.size () && pos1 >= ptr_diff ( p, arg )); BOOST_CHECK ( pos2 == string_ref::npos || pos1 < sr1.size ()); if ( pos2 != string_ref::npos ) { for ( size_t i = sr1.size () -1 ; i > pos2; --i ) BOOST_CHECK ( sr1[i] == *p ); BOOST_CHECK ( sr1 [ pos2 ] != *p ); } BOOST_CHECK ( pos2 != pos1 ); ++p; } }
/** * Compress as much data as possible to the output buffer, sending data * as we go along. * * @return the amount of input bytes that were consumed ("added"), -1 on error. */ static int deflate_add(txdrv_t *tx, const void *data, int len) { struct attr *attr = tx->opaque; z_streamp outz = attr->outz; int added = 0; if (tx_deflate_debugging(9)) { g_debug("TX %s: (%s) given %u bytes (buffer #%d, nagle %s, " "unflushed %zu) [%c%c]%s", G_STRFUNC, gnet_host_to_string(&tx->host), len, attr->fill_idx, (attr->flags & DF_NAGLE) ? "on" : "off", attr->unflushed, (attr->flags & DF_FLOWC) ? 'C' : '-', (attr->flags & DF_FLUSH) ? 'f' : '-', (tx->flags & TX_ERROR) ? " ERROR" : ""); } /* * If an error was already reported, the whole deflate stream is dead * and we cannot accept any more data. */ if G_UNLIKELY(tx->flags & TX_ERROR) return -1; while (added < len) { struct buffer *b = &attr->buf[attr->fill_idx]; /* Buffer we fill */ int ret; int old_added = added; bool flush_started = (attr->flags & DF_FLUSH) ? TRUE : FALSE; int old_avail; const char *in, *old_in; /* * Prepare call to deflate(). */ outz->next_out = cast_to_pointer(b->wptr); outz->avail_out = old_avail = b->end - b->wptr; in = data; old_in = &in[added]; outz->next_in = deconstify_pointer(old_in); outz->avail_in = len - added; g_assert(outz->avail_out > 0); g_assert(outz->avail_in > 0); /* * Compress data. * * If we previously started to flush, continue the operation, now * that we have more room available for the output. */ ret = deflate(outz, flush_started ? Z_SYNC_FLUSH : 0); if (Z_OK != ret) { attr->flags |= DF_SHUTDOWN; (*attr->cb->shutdown)(tx->owner, "Compression failed: %s", zlib_strerror(ret)); return -1; } /* * Update the parameters. */ b->wptr = cast_to_pointer(outz->next_out); added = ptr_diff(outz->next_in, in); g_assert(added >= old_added); attr->unflushed += added - old_added; attr->flushed += old_avail - outz->avail_out; if (NULL != attr->cb->add_tx_deflated) attr->cb->add_tx_deflated(tx->owner, old_avail - outz->avail_out); if (attr->gzip.enabled) { size_t r; r = ptr_diff(outz->next_in, old_in); attr->gzip.size += r; attr->gzip.crc = crc32(attr->gzip.crc, cast_to_constpointer(old_in), r); } if (tx_deflate_debugging(9)) { g_debug("TX %s: (%s) deflated %d bytes into %d " "(buffer #%d, nagle %s, flushed %zu, unflushed %zu) [%c%c]", G_STRFUNC, gnet_host_to_string(&tx->host), added, old_avail - outz->avail_out, attr->fill_idx, (attr->flags & DF_NAGLE) ? "on" : "off", attr->flushed, attr->unflushed, (attr->flags & DF_FLOWC) ? 'C' : '-', (attr->flags & DF_FLUSH) ? 'f' : '-'); } /* * If we filled the output buffer, check whether we have a pending * send buffer. If we do, we cannot process more data. Otherwise * send it now and continue. */ if (0 == outz->avail_out) { if (attr->send_idx >= 0) { deflate_set_flowc(tx, TRUE); /* Enter flow control */ return added; } deflate_rotate_and_send(tx); /* Can set TX_ERROR */ if (tx->flags & TX_ERROR) return -1; } /* * If we were flushing and we consumed all the input, then * the flush is done and we're starting normal compression again. * * This must be done after we made sure that we had enough output * space avaialable. */ if (flush_started && 0 == outz->avail_in) deflate_flushed(tx); } g_assert(0 == outz->avail_in); /* * Start Nagle if not already on. */ if (attr->flags & DF_NAGLE) deflate_nagle_delay(tx); else deflate_nagle_start(tx); /* * We're going to ask for a flush if not already started yet and the * amount of bytes we have written since the last flush is greater * than attr->buffer_flush. */ if (attr->unflushed > attr->buffer_flush) { if (!deflate_flush(tx)) return -1; } return added; }