uint32_t HPACKDecoder::decode(Cursor& cursor, uint32_t totalBytes, headers_t& headers) { uint32_t emittedSize = 0; HPACKDecodeBuffer dbuf(getHuffmanTree(), cursor, totalBytes); while (!hasError() && !dbuf.empty()) { emittedSize += decodeHeader(dbuf, &headers); if (emittedSize > maxUncompressed_) { LOG(ERROR) << "exceeded uncompressed size limit of " << maxUncompressed_ << " bytes"; err_ = DecodeError::HEADERS_TOO_LARGE; return dbuf.consumedBytes(); } } if (version_ != Version::HPACK05) { return dbuf.consumedBytes(); } emittedSize += emitRefset(headers); // the emitted bytes from the refset are bounded by the size of the table, // but adding the check just for uniformity if (emittedSize > maxUncompressed_) { LOG(ERROR) << "exceeded uncompressed size limit of " << maxUncompressed_ << " bytes"; err_ = DecodeError::HEADERS_TOO_LARGE; } return dbuf.consumedBytes(); }
void tuntap::set_read_ready_cb(const std::function<void()> &cb, size_t default_bufsize) { read_ready_cb = cb; // do the first async read here to make data available right away and simplify the code in recv_packet a little recv_buf = dbuf(default_bufsize); do_read(); }
void scan_outlook(const class scanner_params &sp,const recursion_control_block &rcb) { assert(sp.sp_version==scanner_params::CURRENT_SP_VERSION); if(sp.phase==scanner_params::PHASE_STARTUP) { assert(sp.info->si_version==scanner_info::CURRENT_SI_VERSION); sp.info->name = "outlook"; sp.info->author = "Simson L. Garfinkel"; sp.info->description = "Outlook Compressible Encryption"; sp.info->flags = scanner_info::SCANNER_DISABLED \ | scanner_info::SCANNER_RECURSE | scanner_info::SCANNER_DEPTH_0 \ | scanner_info::SCANNER_NO_ALL; return; } if(sp.phase==scanner_params::PHASE_SCAN) { const sbuf_t &sbuf = sp.sbuf; const pos0_t &pos0 = sp.sbuf.pos0; // dodge infinite recursion by refusing to operate on an OFE'd buffer if(rcb.partName == pos0.lastAddedPart()) { return; } // managed_malloc throws an exception if allocation fails. managed_malloc<uint8_t>dbuf(sbuf.bufsize); for(size_t ii = 0; ii < sbuf.bufsize; ii++) { uint8_t ch = sbuf.buf[ii]; dbuf.buf[ii] = libpff_encryption_compressible[ ch ]; } const pos0_t pos0_oce = pos0 + "OUTLOOK"; const sbuf_t child_sbuf(pos0_oce, dbuf.buf, sbuf.bufsize, sbuf.pagesize, false); scanner_params child_params(sp, child_sbuf); (*rcb.callback)(child_params); // recurse on deobfuscated buffer } }
static void test3() { // 将内存池对象 dbuf_pool 做为 dbuf_guard 构造函数参数传入,当 // dbuf_guard 对象销毁时,dbuf_pool 对象一同被销毁 acl::dbuf_guard dbuf(new acl::dbuf_pool); test_dbuf(dbuf); }
static void test4() { // 动态创建 dbuf_guard 对象,同时指定内存池中内存块的分配倍数为 10, // 即指定内部每个内存块大小为 4096 * 10 = 40 KB,同时 // 指定内部动态数组的初始容量大小 acl::dbuf_guard dbuf(10, 100); test_dbuf(dbuf); }
void exclusive_scan(const vex::vector<T> &src, vex::vector<T> &dst) { auto queue = src.queue_list(); std::vector<T> tail; /* If there is more than one partition, we need to take a copy the last * element in each partition (except the last) as otherwise information * about it is lost. * * This must be captured here rather than later, in case the input and * output alias. */ if (queue.size() > 1) { tail.resize(queue.size() - 1); for (unsigned d = 0; d < tail.size(); ++d) { if (src.part_size(d)) tail[d] = src[src.part_start(d + 1) - 1]; } } // Scan partitions separately. for(unsigned d = 0; d < queue.size(); ++d) { if (src.part_size(d)) { boost::compute::command_queue q( queue[d]() ); boost::compute::buffer sbuf( src(d).raw() ); boost::compute::buffer dbuf( dst(d).raw() ); boost::compute::detail::scan( boost::compute::make_buffer_iterator<T>(sbuf, 0), boost::compute::make_buffer_iterator<T>(sbuf, src.part_size(d)), boost::compute::make_buffer_iterator<T>(dbuf, 0), true, q ); } } // If there are more than one partition, // update all of them except for the first. if (queue.size() > 1) { T sum{}; for(unsigned d = 0; d < tail.size(); ++d) { if (src.part_size(d)) { sum += tail[d]; sum += dst[src.part_start(d + 1) - 1]; // Wrap partition into vector for ease of use: vex::vector<T> part(queue[d + 1], dst(d + 1)); part += sum; } } } }
uint32_t HPACKDecoder::decode(Cursor& cursor, uint32_t totalBytes, headers_t& headers) { uint32_t emittedSize = 0; HPACKDecodeBuffer dbuf(getHuffmanTree(), cursor, totalBytes, maxUncompressed_); while (!hasError() && !dbuf.empty()) { emittedSize += decodeHeader(dbuf, &headers); if (emittedSize > maxUncompressed_) { LOG(ERROR) << "exceeded uncompressed size limit of " << maxUncompressed_ << " bytes"; err_ = DecodeError::HEADERS_TOO_LARGE; return dbuf.consumedBytes(); } } return dbuf.consumedBytes(); }
void inclusive_scan(const vex::vector<T> &src, vex::vector<T> &dst) { auto queue = src.queue_list(); // Scan partitions separately. for(unsigned d = 0; d < queue.size(); ++d) { if (src.part_size(d)) { boost::compute::command_queue q( queue[d]() ); boost::compute::buffer sbuf( src(d)() ); boost::compute::buffer dbuf( dst(d)() ); boost::compute::detail::scan( boost::compute::make_buffer_iterator<T>(sbuf, 0), boost::compute::make_buffer_iterator<T>(sbuf, src.part_size(d)), boost::compute::make_buffer_iterator<T>(dbuf, 0), false, q ); } } // If there are more than one partition, // update all of them except for the first. if (queue.size() > 1) { std::vector<T> tail(queue.size() - 1, T()); for(unsigned d = 0; d < tail.size(); ++d) { if (src.part_size(d)) tail[d] = dst[src.part_start(d + 1) - 1]; } std::partial_sum(tail.begin(), tail.end(), tail.begin()); for(unsigned d = 1; d < queue.size(); ++d) { if (src.part_size(d)) { // Wrap partition into vector for ease of use: vex::vector<T> part(queue[d], dst(d)); part += tail[d - 1]; } } } }
uint32_t HPACKDecoder::decodeStreaming( Cursor& cursor, uint32_t totalBytes, HeaderCodec::StreamingCallback* streamingCb) { uint32_t emittedSize = 0; streamingCb_ = streamingCb; HPACKDecodeBuffer dbuf(getHuffmanTree(), cursor, totalBytes, maxUncompressed_); while (!hasError() && !dbuf.empty()) { emittedSize += decodeHeader(dbuf, nullptr); if (emittedSize > maxUncompressed_) { LOG(ERROR) << "exceeded uncompressed size limit of " << maxUncompressed_ << " bytes"; err_ = HPACK::DecodeError::HEADERS_TOO_LARGE; return dbuf.consumedBytes(); } } return dbuf.consumedBytes(); }
/** * given a location in an sbuf, determine if it contains a zip component. * If it does and if it passes validity tests, unzip and recurse. */ inline void scan_zip_component(const class scanner_params &sp,const recursion_control_block &rcb, feature_recorder *zip_recorder,feature_recorder *unzip_recorder,size_t pos) { const sbuf_t &sbuf = sp.sbuf; const pos0_t &pos0 = sp.sbuf.pos0; /* Local file header */ uint16_t version_needed_to_extract= sbuf.get16u(pos+4); uint16_t general_purpose_bit_flag = sbuf.get16u(pos+6); uint16_t compression_method=sbuf.get16u(pos+8); uint16_t lastmodtime=sbuf.get16u(pos+10); uint16_t lastmoddate=sbuf.get16u(pos+12); uint32_t crc32=sbuf.get32u(pos+14); /* not used needed */ uint32_t compr_size=sbuf.get32u(pos+18); uint32_t uncompr_size=sbuf.get32u(pos+22); uint16_t name_len=sbuf.get16u(pos+26); uint16_t extra_field_len=sbuf.get16u(pos+28); if((name_len<=0) || (name_len > zip_name_len_max)) return; // unreasonable name length if(pos+30+name_len > sbuf.bufsize) return; // name is bigger than what's left //if(compr_size<0 || uncompr_size<0) return; // sanity check string name = sbuf.substr(pos+30,name_len); /* scan for unprintable characters. * Name may contain UTF-8 */ if(utf8::find_invalid(name.begin(),name.end()) != name.end()) return; // invalid utf8 in name; not valid zip header if(has_control_characters(name)) return; // no control characters allowed. name=dfxml_writer::xmlescape(name); // make sure it is escaped if(name.size()==0) name="<NONAME>"; // we want at least something /* Save details of the zip header */ std::string mtime = fatDateToISODate(lastmoddate,lastmodtime); char b2[1024]; snprintf(b2,sizeof(b2), "<zipinfo><name>%s</name>" "<version>%d</version><general>%d</general><compression_method>%d</compression_method>" "<uncompr_size>%d</uncompr_size><compr_size>%d</compr_size>" "<mtime>%s</mtime><crc32>%u</crc32>" "<extra_field_len>%d</extra_field_len>", name.c_str(),version_needed_to_extract, general_purpose_bit_flag,compression_method,uncompr_size,compr_size, mtime.c_str(), crc32,extra_field_len); stringstream xmlstream; xmlstream << b2; const unsigned char *data_buf = sbuf.buf+pos+30+name_len+extra_field_len; // where the data starts if(data_buf > sbuf.buf+sbuf.bufsize){ // past the end of buffer? xmlstream << "<disposition>end-of-buffer</disposition></zipinfo>"; zip_recorder->write(pos0+pos,name,xmlstream.str()); return; } /* OpenOffice makes invalid ZIP files with compr_size=0 and uncompr_size=0. * If compr_size==uncompr_size==0, then assume it may go to the end of the sbuf. */ if(uncompr_size==0 && compr_size==0){ uncompr_size = zip_max_uncompr_size; compr_size = zip_max_uncompr_size; } /* See if we can decompress */ if(version_needed_to_extract==20 && uncompr_size>=zip_min_uncompr_size){ if(uncompr_size > zip_max_uncompr_size){ uncompr_size = zip_max_uncompr_size; // don't uncompress bigger than 16MB } // don't decompress beyond end of buffer if((u_int)compr_size > sbuf.bufsize - (data_buf-sbuf.buf)){ compr_size = sbuf.bufsize - (data_buf-sbuf.buf); } /* If depth is more than 0, don't decompress if we have seen this component before */ if(sp.depth>0){ if(sp.fs.check_previously_processed(data_buf,compr_size)){ xmlstream << "<disposition>previously-processed</disposition></zipinfo>"; zip_recorder->write(pos0+pos,name,xmlstream.str()); return; } } managed_malloc<Bytef>dbuf(uncompr_size); if(!dbuf.buf){ xmlstream << "<disposition>calloc-failed</disposition></zipinfo>"; zip_recorder->write(pos0+pos,name,xmlstream.str()); return; } z_stream zs; memset(&zs,0,sizeof(zs)); zs.next_in = (Bytef *)data_buf; // note that next_in should be typedef const but is not zs.avail_in = compr_size; zs.next_out = dbuf.buf; zs.avail_out = uncompr_size; int r = inflateInit2(&zs,-15); if(r==0){ r = inflate(&zs,Z_SYNC_FLUSH); xmlstream << "<disposition bytes='" << zs.total_out << "'>decompressed</disposition></zipinfo>"; zip_recorder->write(pos0+pos,name,xmlstream.str()); /* Ignore the error return; process data if we got anything */ if(zs.total_out>0){ const pos0_t pos0_zip = (pos0 + pos) + rcb.partName; const sbuf_t sbuf_new(pos0_zip, dbuf.buf,zs.total_out,zs.total_out,false); // sbuf w/ decompressed data scanner_params spnew(sp,sbuf_new); // scanner_params that points to the sbuf (*rcb.callback)(spnew); // process the sbuf /* If we are carving, then carve; and change to * underbars in the filename. */ if(unzip_recorder){ std::string carve_name("_"); // begin with a _ for(std::string::const_iterator it = name.begin(); it!=name.end();it++){ carve_name.push_back(*it=='/' ? '_' : *it); } std::string fn = unzip_recorder->carve(sbuf_new,0,sbuf_new.bufsize,carve_name,hasher); unzip_recorder->set_carve_mtime(fn,mtime); } } r = inflateEnd(&zs); } else { xmlstream << "<disposition>decompress-failed</disposition></zipinfo>"; zip_recorder->write(pos0+pos,name,xmlstream.str()); } } }
static int ah_put(ostream &os, AHRecord const &rec0) { // write header+data to an ostream AHRecord rec(rec0); // XXX for const'ness // write header ********************************* int status=AH_SUCCESS, ndata, dtype; { char hbuf[HEADER_SIZE]; XDR xdrs; xdrmem_create(&xdrs, hbuf, HEADER_SIZE, XDR_ENCODE); if ( ! xdr_Header(&xdrs, &rec, ndata, dtype)) // sets ndata, dtype status = AH_ERROR; if (status != AH_SUCCESS) return AH_ERROR; os.write(hbuf, HEADER_SIZE); if (os.fail()) return AH_ERROR; xdr_destroy(&xdrs); } // write data samples *************************** XDR xdrs; int bufsize = buflen(ndata, dtype); // write data to an ostream vector<char> dbuf(bufsize+100); char *buf = &dbuf[0]; xdrmem_create(&xdrs, buf, bufsize, XDR_ENCODE); int n = ndata; switch (dtype) { case AH_DATATYPE_FLOAT: { # ifdef IEEE_INTEL char *b2 = (char*) xdrs.x_private; # endif Array *arr = rec.data(); if (arr==0) cerr << "THIS MUST NEVER HAPPEN: arr==0\n"; float *f = (float*)(arr->data()); # ifdef IEEE_SPARC assert(sizeof(float)==4); memcpy((void*) xdrs.x_private, (void*)f, n*sizeof(float)); # else while (n--) { # ifdef IEEE_INTEL char *b1 = (char*)(++f); *(b2++) = *(--b1); *(b2++) = *(--b1); *(b2++) = *(--b1); *(b2++) = *(--b1); # else if ( ! xdr_float(&xdrs, f++)) { cerr << "error while reading data" << endl; status = AH_ERROR; break; } # endif } # endif } break; case AH_DATATYPE_COMPLEX: /* { float re, im; Complex *c = (Complex*) rec.xdata; for (i=0; i<n; i++) { re = real(c[i]); im = imag(c[i]); if ( ! xdr_float(&xdrs, &re) || ! xdr_float(&xdrs, &im)) { ah_error(ahERR_IO_WR, "error while writing data"); status = AH_ERROR; break; } } } */ break; default: cerr << "ah_error_illegal_data_type YY" << endl; break; } xdr_destroy(&xdrs); os.write(buf, bufsize); if (os.fail()) status = AH_ERROR; return status; }
static int ah_get(istream &is, AHRecord &rec) { // read header+data from an istream int ndata, dtype; // read header ********************************* char hbuf[HEADER_SIZE]; XDR xdrs; // read a header from an istream is.read(hbuf, HEADER_SIZE); if (is.eof()) { // no bytes read -> proper EOF // otherwise -> unexpected EOF return is.gcount()==0 ? AH_SUCCESS : AH_ERROR; } if (is.fail()) { cerr << "failed to read header" << endl; return AH_ERROR; } xdrmem_create(&xdrs, hbuf, (unsigned int) HEADER_SIZE, XDR_DECODE); int status = AH_SUCCESS; // read a header from an XDR stream if ( ! xdr_Header(&xdrs, &rec, ndata, dtype)) status = AH_ERROR; // check data type -> XXX do this inside xdr_Header // if ( ! is_valid_data_type(rec->type)) // status = AH_ERROR; if (status == AH_SUCCESS && is.eof()) // XXX XXX XXX XXX XXX XXX XXX return AH_SUCCESS; // proper EOF // read data samples *************************** size_t bufsize = buflen(ndata, dtype); vector<char> dbuf(bufsize); char *buf = &dbuf[0]; is.read(buf, bufsize); // While reading data we shall never reach EOF if (is.eof()) return AH_ERROR; // unexpected EOF if (is.fail()) return AH_ERROR; // <- obsolete ??? xdrmem_create(&xdrs, buf, bufsize, XDR_DECODE); int n = ndata; switch (dtype) { case AH_DATATYPE_FLOAT: { // allocate new data FloatArray *arr = new FloatArray(n); rec.setData(arr); float *f = (float*)(arr->data()); # ifdef IEEE_INTEL float *f0 = (float*) xdrs.x_private; char *b2 = (char*) f; # endif // read data # ifdef IEEE_SPARC assert(sizeof(float)==4); // copy without swapping memcpy(f, (void*) xdrs.x_private, n*sizeof(float)); # else while (n--) { # ifdef IEEE_INTEL char *b1 = (char*)(++f0); *(b2++) = *(--b1); *(b2++) = *(--b1); *(b2++) = *(--b1); *(b2++) = *(--b1); # else if ( ! xdr_float(&xdrs, f++)) cerr << "error while reading data" << endl; # endif } # endif } break; case AH_DATATYPE_COMPLEX: /* { float re, im; // allocate new data Complex *c = new Complex [n]; rec->xdata = (void*) c; // read data for (int i=0; i<n; i++, c++) { re = im = 0.0; if ( ! xdr_float(&xdrs, &re) || ! xdr_float(&xdrs, &im)) ah_error(1, "error while reading data"); *c = Complex(re, im); } } */ break; default: cerr << "ah_error_illegal_data_type XY" << endl; break; } xdr_destroy(&xdrs); return status; }