ErrorCode File::pread(ByteVector &buf, uint64_t &count, uint64_t offset) { if (!valid()) { return _lastError = kErrorInvalidHandle; } if (offset > std::numeric_limits<off_t>::max()) { return _lastError = kErrorInvalidArgument; } auto offArg = static_cast<off_t>(offset); auto countArg = static_cast<size_t>(count); buf.resize(countArg); ssize_t nRead = ::pread(_fd, buf.data(), countArg, offArg); if (nRead < 0) { return _lastError = Platform::TranslateError(); } buf.resize(nRead); count = static_cast<uint64_t>(nRead); return _lastError = kSuccess; }
ByteVector zlib::decompress(const ByteVector &data) { #ifdef HAVE_ZLIB z_stream stream = {}; if(inflateInit(&stream) != Z_OK) { debug("zlib::decompress() - Failed to initizlize zlib."); return ByteVector(); } ByteVector inData = data; stream.avail_in = static_cast<uInt>(inData.size()); stream.next_in = reinterpret_cast<Bytef *>(inData.data()); const unsigned int chunkSize = 1024; ByteVector outData; do { const size_t offset = outData.size(); outData.resize(outData.size() + chunkSize); stream.avail_out = static_cast<uInt>(chunkSize); stream.next_out = reinterpret_cast<Bytef *>(outData.data() + offset); const int result = inflate(&stream, Z_NO_FLUSH); if(result == Z_STREAM_ERROR || result == Z_NEED_DICT || result == Z_DATA_ERROR || result == Z_MEM_ERROR) { if(result != Z_STREAM_ERROR) inflateEnd(&stream); debug("zlib::decompress() - Error reading compressed stream."); return ByteVector(); } outData.resize(outData.size() - stream.avail_out); } while(stream.avail_out == 0); inflateEnd(&stream); return outData; #else return ByteVector(); #endif }
//============================================================================== void CC_UnitDriver::write_flash_slow(const DataSectionStore §ion_store) { const size_t WRITE_BLOCK_SIZE = reg_info_.write_block_size; // Channel 0: Xdata buffer -> Flash controller uint8_t dma_desc[8] = { HIBYTE(reg_info_.dma_data_offset),// src[15:8] LOBYTE(reg_info_.dma_data_offset),// src[7:0] HIBYTE(reg_info_.fwdata), // dest[15:8] LOBYTE(reg_info_.fwdata), // dest[7:0] HIBYTE(WRITE_BLOCK_SIZE), // block size[15:8] LOBYTE(WRITE_BLOCK_SIZE), // block size[7:0] 18, // trigger FLASH 0x42 // increment source }; // Load dma descriptors write_xdata_memory(reg_info_.dma0_cfg_offset, dma_desc, sizeof(dma_desc)); // Set the pointer to the DMA descriptors write_xdata_memory(reg_info_.dma0_cfgl, LOBYTE(reg_info_.dma0_cfg_offset)); write_xdata_memory(reg_info_.dma0_cfgh, HIBYTE(reg_info_.dma0_cfg_offset)); size_t faddr = 0xFFFF; ByteVector data; section_store.create_image(0xFF, data); data.resize((section_store.upper_address() + (WRITE_BLOCK_SIZE - 1)) & ~(WRITE_BLOCK_SIZE - 1), 0xFF); pw_.write_start(data.size()); ByteVector empty_block; empty_block.resize(WRITE_BLOCK_SIZE, 0xFF); for (size_t i = 0; i < (data.size() / WRITE_BLOCK_SIZE); i++) { pw_.write_progress(WRITE_BLOCK_SIZE); size_t offset = WRITE_BLOCK_SIZE * i; if (!memcmp(&data[offset], &empty_block[0], WRITE_BLOCK_SIZE)) continue; size_t new_faddr = WRITE_BLOCK_SIZE * i / reg_info_.flash_word_size; if (new_faddr != faddr) { faddr = WRITE_BLOCK_SIZE * i / reg_info_.flash_word_size; write_xdata_memory(reg_info_.faddrl, LOBYTE(faddr)); write_xdata_memory(reg_info_.faddrh, HIBYTE(faddr)); faddr += WRITE_BLOCK_SIZE / reg_info_.flash_word_size; } write_xdata_memory(reg_info_.dma_data_offset, &data[offset], WRITE_BLOCK_SIZE); write_xdata_memory(reg_info_.dma_arm, 0x01); write_xdata_memory(reg_info_.fctl, reg_info_.fctl_write); while ((read_xdata_memory(reg_info_.fctl) & 0x80)); } pw_.write_finish(); }
//============================================================================== void CC_UnitDriver::flash_read_32k(size_t address, size_t size, ByteVector &data) { if (((address % 0x8000) + size) > 0x8000) throw std::runtime_error("flash_read_32k, incorrect parameters"); const uint8_t load_dtpr[] = { 0xBE, 0x57, 0x90, address >> 8, address }; usb_device_.bulk_write(ENDPOINT_OUT, sizeof(load_dtpr), load_dtpr); size_t offset = data.size(); data.resize(offset + size, 0xFF); ByteVector command; for (size_t i = 0; i < size / FLASH_READ_CHUNK_SIZE; i++) { if (command.empty()) create_read_proc(FLASH_READ_CHUNK_SIZE, command); usb_device_.bulk_write(ENDPOINT_OUT, command.size(), &command[0]); usb_device_.bulk_read(ENDPOINT_IN, FLASH_READ_CHUNK_SIZE, &data[offset]); offset += FLASH_READ_CHUNK_SIZE; pw_.read_progress(FLASH_READ_CHUNK_SIZE); } if ((size % FLASH_READ_CHUNK_SIZE)) { create_read_proc(size % FLASH_READ_CHUNK_SIZE, command); usb_device_.bulk_write(ENDPOINT_OUT, command.size(), &command[0]); usb_device_.bulk_read(ENDPOINT_IN, size - offset, &data[offset]); pw_.read_progress(size - offset); } }
//============================================================================== void CC_UnitDriver::flash_read_near(uint16_t address, size_t size, ByteVector &data) { const uint8_t load_dtpr[] = { 0xBE, 0x57, 0x90, HIBYTE(address), LOBYTE(address) }; usb_device_.bulk_write(endpoint_out_, sizeof(load_dtpr), load_dtpr); size_t offset = data.size(); data.resize(offset + size, FLASH_EMPTY_BYTE); ByteVector command; for (size_t i = 0; i < size / FLASH_READ_CHUNK_SIZE; i++) { if (command.empty()) create_read_proc(FLASH_READ_CHUNK_SIZE, command); usb_device_.bulk_write(endpoint_out_, command.size(), &command[0]); usb_device_.bulk_read(endpoint_in_, FLASH_READ_CHUNK_SIZE, &data[offset]); offset += FLASH_READ_CHUNK_SIZE; pw_.read_progress(FLASH_READ_CHUNK_SIZE); } if ((size % FLASH_READ_CHUNK_SIZE)) { create_read_proc(size % FLASH_READ_CHUNK_SIZE, command); usb_device_.bulk_write(endpoint_out_, command.size(), &command[0]); usb_device_.bulk_read(endpoint_in_, size - offset, &data[offset]); pw_.read_progress(size - offset); } }
// ------------ reading from the registry ------------ bool Registry::GetValue(HKEY hive, const std::string& path, const std::string& key, ByteVector& value, DWORD& type) { HKEY hk; // doc says it needs a 'class' string, everybody else says class don't exist. if (ERROR_SUCCESS!=RegOpenKeyEx(hive, path.c_str(), 0, KEY_READ, &hk)) { error("GetRegistryString - RegOpenKeyEx(%08lx, %s)", hive, path.c_str()); return false; } DWORD vallen= 0; if (ERROR_SUCCESS!=RegQueryValueEx(hk, key.c_str(), 0, NULL, NULL, &vallen)) { error("RegQueryValueEx"); return false; } value.resize(vallen); if (ERROR_SUCCESS!=RegQueryValueEx(hk, key.c_str(), 0, &type, vectorptr(value), &vallen)) { error("RegQueryValueEx"); return false; } RegCloseKey(hk); return true; }
void CryptoProxy::encrypt(CryptoKey* key, const ByteVector& data, ByteVector& encryptedData) { if ((data.length() % 16) > 0) throw runtime_error("encrypt: incorrect data length"); encryptedData = data; //Crypto encrypts in-place // NOTE: do not reserve block for padding, specify final = FALSE instead //encryptedData.append(0x00, 16); // add one extra block as MS wants it for padding const DWORD bytesToEncrypt = static_cast<DWORD>(data.size()); DWORD dataSize = bytesToEncrypt; try { TOE(CryptEncrypt(*key, 0, FALSE/*final*/, 0/*flags*/, &encryptedData.front(), &dataSize, static_cast<DWORD>(encryptedData.size())), "CryptEncrypt 1st"); } catch (WinApiError& e) { // larger buffer is required?? if (e.getErr() == ERROR_MORE_DATA) { encryptedData.append(0x00, dataSize - encryptedData.size()); // try again, no second catch this time dataSize = bytesToEncrypt; //set again to input data length TOE(CryptEncrypt(*key, 0, FALSE/*final*/, 0/*flags*/, &encryptedData.front(), &dataSize, static_cast<DWORD>(encryptedData.size())), "CryptEncrypt 2nd"); } else throw; // another error -> re-throw } // strip the padding encryptedData.resize(bytesToEncrypt); }
void CryptoHash::getValue(ByteVector& value) { DWORD reqHashLen = 0; TOE(CryptGetHashParam(handle, HP_HASHVAL, NULL, &reqHashLen, 0), "CryptGetHashParam"); value.resize(reqHashLen); TOE(CryptGetHashParam(handle, HP_HASHVAL, &value.front(), &reqHashLen, 0), "CryptGetHashParam"); }
//============================================================================== void File::read(ByteVector &data, size_t size) { check_open("File::read", file_); data.resize(size); if (fread(&data[0], data.size(), 1, file_) != 1 || ferror(file_)) file_io_error("File::read", file_name_); }
void Monitor::queueNotificationTask(int dwSize) { // We could just swap back and forth between the two // buffers, but this code is easier to understand and debug. ByteVector buf; buf.resize(dwSize); memcpy(&buf[0], &m_Buffer[0], dwSize); server->base->callAsync([=] { server->base->processNotification(path, buf); }); }
uint read(TagLib::File &file, uint limit) { ByteVector data = file.readBlock(std::min(m_size, limit)); uint count = data.size(); int index = data.find((char) 0); if(index > -1) { data.resize(index); } data.replace((char) 0xff, ' '); value = data; return count; }
bool Registry::DwordToValue(DWORD dw, ByteVector& value) { value.resize(4); value[0]= (BYTE)dw; dw>>=8; value[1]= (BYTE)dw; dw>>=8; value[2]= (BYTE)dw; dw>>=8; value[3]= (BYTE)dw; return true; }
void CryptoProxy::decrypt(CryptoKey* key, const ByteVector& data, ByteVector& decryptedData) { decryptedData = data; //Crypto decrypts in-place DWORD dataSize = static_cast<DWORD>(data.size()); // NOTE: check FINAL again - currently no FINAL to avoid checking padding //TOE(CryptDecrypt(*key, 0, TRUE, 0/*flags*/, &decryptedData.front(), &dataSize), "CryptDecrypt"); TOE(CryptDecrypt(*key, 0, FALSE, 0/*flags*/, &decryptedData.front(), &dataSize), "CryptDecrypt"); if (dataSize < decryptedData.size()) { decryptedData.resize(decryptedData.size() - dataSize); } }
String ASF::File::readString(int length) { ByteVector data = readBlock(length); unsigned int size = data.size(); while (size >= 2) { if(data[size - 1] != '\0' || data[size - 2] != '\0') { break; } size -= 2; } if(size != data.size()) { data.resize(size); } return String(data, String::UTF16LE); }
ByteVector Ogg::PageHeader::lacingValues() const { ByteVector data; for(List<int>::ConstIterator it = d->packetSizes.begin(); it != d->packetSizes.end(); ++it) { // The size of a packet in an Ogg page is indicated by a series of "lacing // values" where the sum of the values is the packet size in bytes. Each of // these values is a byte. A value of less than 255 (0xff) indicates the end // of the packet. data.resize(data.size() + (*it / 255), '\xff'); if(it != --d->packetSizes.end() || d->lastPacketCompleted) data.append(static_cast<unsigned char>(*it % 255)); } return data; }
bool Registry::EnumRegValues(HKEY hkey) { string name; name.resize(1024); ByteVector data; data.resize(1024); int i=0; while (true) { DWORD cbName= (DWORD)name.size(); DWORD cbData= (DWORD)data.size(); DWORD type; LONG rc= RegEnumValue(hkey, i, stringptr(name), &cbName, NULL, &type, vectorptr(data), &cbData); if (rc==ERROR_NO_MORE_ITEMS) break; debug("value %s = %s\n", name.c_str(), ValueToString(type, data).c_str()); i++; } return true; }
bool SdOsImageWriter::Close() { if (!m_bOpen) return false; m_bOpen= false; // finish by writing NUL's at the end. ByteVector nullbuf; nullbuf.resize(0x100000); m_sd->WriteData(2*m_sd->GetBlockSize()+0x1ec0000, nullbuf); // write checksum ByteVector sumbuf((BYTE*)&m_checksum, (BYTE*)((&m_checksum)+1)); sumbuf.resize(m_sd->GetBlockSize()); m_sd->WriteData(m_sd->GetBlockSize(), sumbuf); m_sd->Close(); return true; }
//============================================================================== void CC_UnitDriver::read_xdata_memory(uint16_t address, size_t count, ByteVector &data) { log_info("programmer, read xdata memory at %04Xh, count: %u", address, count); uint8_t header[] = { 0x40, 0x55, 0x00, 0x72, 0x56, 0xE5, 0x92, 0xBE, 0x57, 0x75, 0x92, 0x00, 0x74, 0x56, 0xE5, 0x83, 0x76, 0x56, 0xE5, 0x82 }; uint8_t footer[] = { 0xD4, 0x57, 0x90, 0xC2, 0x57, 0x75, 0x92, 0x90, 0x56, 0x74 }; uint8_t load_dtpr[] = { 0xBE, 0x57, 0x90, 0x00, 0x00 }; uint8_t mov_a_dtpr[] = { 0x4E, 0x55, 0xE0 }; uint8_t inc_dtpr[] = { 0x5E, 0x55, 0xA3 }; ByteVector command; vector_append(command, header, sizeof(header)); load_dtpr[sizeof(load_dtpr) - 1] = address; load_dtpr[sizeof(load_dtpr) - 2] = address >> 8; vector_append(command, load_dtpr, sizeof(load_dtpr)); for (size_t i = 0; i < count; i++) { if (i == (count - 1) || !((i + 1) % 64)) mov_a_dtpr[0] |= 1; else mov_a_dtpr[0] &= ~1; vector_append(command, mov_a_dtpr, sizeof(mov_a_dtpr)); vector_append(command, inc_dtpr, sizeof(inc_dtpr)); } vector_append(command, footer, sizeof(footer)); data.resize(count); usb_device_.bulk_write(endpoint_out_, command.size(), &command[0]); usb_device_.bulk_read(endpoint_in_, count, &data[0]); log_info("programmer, read xdata memory, data: %s", binary_to_hex(&data[0], count, " ").c_str()); }
void testResize() { ByteVector a = ByteVector("0123456789"); ByteVector b = a.mid(3, 4); b.resize(6, 'A'); CPPUNIT_ASSERT_EQUAL(uint(6), b.size()); CPPUNIT_ASSERT_EQUAL('6', b[3]); CPPUNIT_ASSERT_EQUAL('A', b[4]); CPPUNIT_ASSERT_EQUAL('A', b[5]); b.resize(10, 'B'); CPPUNIT_ASSERT_EQUAL(uint(10), b.size()); CPPUNIT_ASSERT_EQUAL('6', b[3]); CPPUNIT_ASSERT_EQUAL('B', b[6]); CPPUNIT_ASSERT_EQUAL('B', b[9]); b.resize(3, 'C'); CPPUNIT_ASSERT_EQUAL(uint(3), b.size()); CPPUNIT_ASSERT_EQUAL(-1, b.find('C')); b.resize(3); CPPUNIT_ASSERT_EQUAL(uint(3), b.size()); // Check if a and b were properly detached. CPPUNIT_ASSERT_EQUAL(uint(10), a.size()); CPPUNIT_ASSERT_EQUAL('3', a[3]); CPPUNIT_ASSERT_EQUAL('5', a[5]); // Special case that refCount == 1 and d->offset != 0. ByteVector c = ByteVector("0123456789").mid(3, 4); c.resize(6, 'A'); CPPUNIT_ASSERT_EQUAL(uint(6), c.size()); CPPUNIT_ASSERT_EQUAL('6', c[3]); CPPUNIT_ASSERT_EQUAL('A', c[4]); CPPUNIT_ASSERT_EQUAL('A', c[5]); c.resize(10, 'B'); CPPUNIT_ASSERT_EQUAL(uint(10), c.size()); CPPUNIT_ASSERT_EQUAL('6', c[3]); CPPUNIT_ASSERT_EQUAL('B', c[6]); CPPUNIT_ASSERT_EQUAL('B', c[9]); c.resize(3, 'C'); CPPUNIT_ASSERT_EQUAL(uint(3), c.size()); CPPUNIT_ASSERT_EQUAL(-1, c.find('C')); }
///--------------------------------------------------------------------------------- /// ///--------------------------------------------------------------------------------- bool LoadBinaryFileToExistingByteVector( const std::string& filePath, ByteVector& existingVectorBuffer ) { FILE* file; fopen_s( &file, filePath.c_str(), "rb" ); if (!file) { // freak out return false; } size_t neededBufferSize = GetFileLength( file ); // Grow/Shrink existingVectorBuffer.resize( neededBufferSize ); fread( existingVectorBuffer.data(), sizeof( unsigned char ), neededBufferSize, file ); fclose( file ); return true; }
size_t IFile::read(ByteVector &vector) { size_t cnt = 0, oldPos, size; char *buffer; if (stream == NULL) return 0; oldPos = ftell(stream); fseek(stream, 0, SEEK_END); size = ftell(stream); fseek(stream, 0, SEEK_SET); vector.resize(size); buffer = vector.data(); while (cnt < size && !feof(stream) && !ferror(stream)) fread(buffer, 1, size - cnt, stream); if (!ferror(stream)) fseek(stream, oldPos, SEEK_SET); return cnt; }
void CryptoProxy::genRand(unsigned int reqSize, ByteVector& randData) { randData.resize(reqSize); TOE(CryptGenRandom(impl->provider, reqSize, &randData.front()), "CryptGenRandom"); }
int main(int argc, char** argv) { const uint64_t N_TIMESTAMPS = 1000; const uint64_t N_PARAMS = 100; UncompressedChunk header; std::cout << "Testing timestamp sequence" << std::endl; int c = 100; std::vector<aku_ParamId> ids; for (uint64_t id = 0; id < N_PARAMS; id++) { ids.push_back(id); } RandomWalk rwalk(10.0, 0.0, 0.01, N_PARAMS); for (uint64_t id = 0; id < N_PARAMS; id++) { for (uint64_t ts = 0; ts < N_TIMESTAMPS; ts++) { header.paramids.push_back(ids[id]); int k = rand() % 2; if (k) { c++; } else if (c > 0) { c--; } header.timestamps.push_back((ts + c) << 8); header.values.push_back(rwalk.generate(0)); } } ByteVector out; out.resize(N_PARAMS*N_TIMESTAMPS*24); const size_t UNCOMPRESSED_SIZE = header.paramids.size()*8 // Didn't count lengths and offsets + header.timestamps.size()*8 // because because this arrays contains + header.values.size()*8; // no information and should be compressed // to a few bytes struct Writer : ChunkWriter { ByteVector *out; Writer(ByteVector *out) : out(out) {} virtual aku_MemRange allocate() { aku_MemRange range = { out->data(), static_cast<uint32_t>(out->size()) }; return range; } //! Commit changes virtual aku_Status commit(size_t bytes_written) { out->resize(bytes_written); return AKU_SUCCESS; } }; Writer writer(&out); aku_Timestamp tsbegin, tsend; uint32_t n; auto status = CompressionUtil::encode_chunk(&n, &tsbegin, &tsend, &writer, header); if (status != AKU_SUCCESS) { std::cout << "Encoding error" << std::endl; return 1; } // Compress using zlib // Ids copy (zlib need all input data to be aligned because it uses SSE2 internally) Bytef* pgz_ids = (Bytef*)aligned_alloc(64, header.paramids.size()*8); memcpy(pgz_ids, header.paramids.data(), header.paramids.size()*8); // Timestamps copy Bytef* pgz_ts = (Bytef*)aligned_alloc(64, header.timestamps.size()*8); memcpy(pgz_ts, header.timestamps.data(), header.timestamps.size()*8); // Values copy Bytef* pgz_val = (Bytef*)aligned_alloc(64, header.values.size()*8); memcpy(pgz_val, header.values.data(), header.values.size()*8); const auto gz_max_size = N_PARAMS*N_TIMESTAMPS*24; Bytef* pgzout = (Bytef*)aligned_alloc(64, gz_max_size); uLongf gzoutlen = gz_max_size; size_t total_gz_size = 0, id_gz_size = 0, ts_gz_size = 0, float_gz_size = 0; // compress param ids auto zstatus = compress(pgzout, &gzoutlen, pgz_ids, header.paramids.size()*8); if (zstatus != Z_OK) { std::cout << "GZip error" << std::endl; exit(zstatus); } total_gz_size += gzoutlen; id_gz_size = gzoutlen; gzoutlen = gz_max_size; // compress timestamps zstatus = compress(pgzout, &gzoutlen, pgz_ts, header.timestamps.size()*8); if (zstatus != Z_OK) { std::cout << "GZip error" << std::endl; exit(zstatus); } total_gz_size += gzoutlen; ts_gz_size = gzoutlen; gzoutlen = gz_max_size; // compress floats zstatus = compress(pgzout, &gzoutlen, pgz_val, header.values.size()*8); if (zstatus != Z_OK) { std::cout << "GZip error" << std::endl; exit(zstatus); } total_gz_size += gzoutlen; float_gz_size = gzoutlen; const float GZ_BPE = float(total_gz_size)/header.paramids.size(); const float GZ_RATIO = float(UNCOMPRESSED_SIZE)/float(total_gz_size); const size_t COMPRESSED_SIZE = out.size(); const float BYTES_PER_EL = float(COMPRESSED_SIZE)/header.paramids.size(); const float COMPRESSION_RATIO = float(UNCOMPRESSED_SIZE)/COMPRESSED_SIZE; std::cout << "Uncompressed: " << UNCOMPRESSED_SIZE << std::endl << " compressed: " << COMPRESSED_SIZE << std::endl << " elements: " << header.paramids.size() << std::endl << " bytes/elem: " << BYTES_PER_EL << std::endl << " ratio: " << COMPRESSION_RATIO << std::endl ; std::cout << "Gzip stats: " << std::endl << "bytes/elem: " << GZ_BPE << std::endl << " ratio: " << GZ_RATIO << std::endl << " id bytes: " << id_gz_size << std::endl << " ts bytes: " << ts_gz_size << std::endl << " val bytes: " << float_gz_size << std::endl; // Try to decompress UncompressedChunk decomp; const unsigned char* pbegin = out.data(); const unsigned char* pend = pbegin + out.size(); CompressionUtil::decode_chunk(&decomp, pbegin, pend, header.timestamps.size()); bool first_error = true; for (auto i = 0u; i < header.timestamps.size(); i++) { if (header.timestamps.at(i) != decomp.timestamps.at(i) && first_error) { std::cout << "Error, bad timestamp at " << i << std::endl; first_error = false; } if (header.paramids.at(i) != decomp.paramids.at(i) && first_error) { std::cout << "Error, bad paramid at " << i << std::endl; first_error = false; } double origvalue = header.values.at(i); double decvalue = decomp.values.at(i); if (origvalue != decvalue && first_error) { std::cout << "Error, bad value at " << i << std::endl; std::cout << "Expected: " << origvalue << std::endl; std::cout << "Actual: " << decvalue << std::endl; first_error = false; } } if (argc == 2 && std::string(argv[1]) == "benchmark") { // Bench compression process const int NRUNS = 1000; PerfTimer tm; aku_Status tstatus; volatile uint32_t vn; ByteVector vec; for (int i = 0; i < NRUNS; i++) { vec.resize(N_PARAMS*N_TIMESTAMPS*24); Writer w(&vec); aku_Timestamp ts; uint32_t n; tstatus = CompressionUtil::encode_chunk(&n, &ts, &ts, &w, header); if (tstatus != AKU_SUCCESS) { std::cout << "Encoding error" << std::endl; return 1; } vn = n; } double elapsed = tm.elapsed(); std::cout << "Elapsed (akumuli): " << elapsed << " " << vn << std::endl; tm.restart(); for (int i = 0; i < NRUNS; i++) { uLongf offset = 0; // compress param ids auto zstatus = compress(pgzout, &gzoutlen, pgz_ids, header.paramids.size()*8); if (zstatus != Z_OK) { std::cout << "GZip error" << std::endl; exit(zstatus); } offset += gzoutlen; gzoutlen = gz_max_size - offset; // compress timestamps zstatus = compress(pgzout + offset, &gzoutlen, pgz_ts, header.timestamps.size()*8); if (zstatus != Z_OK) { std::cout << "GZip error" << std::endl; exit(zstatus); } offset += gzoutlen; gzoutlen = gz_max_size - offset; // compress floats zstatus = compress(pgzout + offset, &gzoutlen, pgz_val, header.values.size()*8); if (zstatus != Z_OK) { std::cout << "GZip error" << std::endl; exit(zstatus); } } elapsed = tm.elapsed(); std::cout << "Elapsed (zlib): " << elapsed << " " << vn << std::endl; } }
//! Commit changes virtual aku_Status commit(size_t bytes_written) { out->resize(bytes_written); return AKU_SUCCESS; }
List<Ogg::Page *> Ogg::Page::paginate(const ByteVectorList &packets, PaginationStrategy strategy, uint streamSerialNumber, int firstPage, bool firstPacketContinued, bool lastPacketCompleted, bool containsLastPacket) { List<Page *> l; int totalSize = 0; for(ByteVectorList::ConstIterator it = packets.begin(); it != packets.end(); ++it) totalSize += (*it).size(); // Handle creation of multiple pages with appropriate pagination. if(strategy == Repaginate || totalSize + packets.size() > 255 * 255) { // SPLITSIZE must be a multiple of 255 in order to get the lacing values right // create pages of about 8KB each #define SPLITSIZE (32*255) int pageIndex = 0; for(ByteVectorList::ConstIterator it = packets.begin(); it != packets.end(); ++it) { bool continued = false; // mark very first packet? if(firstPacketContinued && it==packets.begin()) { continued = true; } // append to buf ByteVector packetBuf; packetBuf.append(*it); while(packetBuf.size() > SPLITSIZE) { // output a Page ByteVector packetForOnePage; packetForOnePage.resize(SPLITSIZE); std::copy(packetBuf.begin(), packetBuf.begin() + SPLITSIZE, packetForOnePage.begin()); ByteVectorList packetList; packetList.append(packetForOnePage); Page *p = new Page(packetList, streamSerialNumber, firstPage+pageIndex, continued, false, false); l.append(p); pageIndex++; continued = true; packetBuf = packetBuf.mid(SPLITSIZE); } ByteVectorList::ConstIterator jt = it; ++jt; bool lastPacketInList = (jt == packets.end()); // output a page for the rest (we output one packet per page, so this one should be completed) ByteVectorList packetList; packetList.append(packetBuf); bool isVeryLastPacket = false; if(containsLastPacket) { // mark the very last output page as last of stream ByteVectorList::ConstIterator jt = it; ++jt; if(jt == packets.end()) { isVeryLastPacket = true; } } Page *p = new Page(packetList, streamSerialNumber, firstPage+pageIndex, continued, lastPacketInList ? lastPacketCompleted : true, isVeryLastPacket); pageIndex++; l.append(p); } } else { Page *p = new Page(packets, streamSerialNumber, firstPage, firstPacketContinued, lastPacketCompleted, containsLastPacket); l.append(p); } return l; }
void File::insert(const ByteVector &data, ulong start, ulong replace) { if(!d->file) return; if(data.size() == replace) { seek(start); writeBlock(data); return; } else if(data.size() < replace) { seek(start); writeBlock(data); removeBlock(start + data.size(), replace - data.size()); return; } // Woohoo! Faster (about 20%) than id3lib at last. I had to get hardcore // and avoid TagLib's high level API for rendering just copying parts of // the file that don't contain tag data. // // Now I'll explain the steps in this ugliness: // First, make sure that we're working with a buffer that is longer than // the *differnce* in the tag sizes. We want to avoid overwriting parts // that aren't yet in memory, so this is necessary. ulong bufferLength = bufferSize(); while(data.size() - replace > bufferLength) bufferLength += bufferSize(); // Set where to start the reading and writing. long readPosition = start + replace; long writePosition = start; ByteVector buffer; ByteVector aboutToOverwrite(static_cast<uint>(bufferLength)); // This is basically a special case of the loop below. Here we're just // doing the same steps as below, but since we aren't using the same buffer // size -- instead we're using the tag size -- this has to be handled as a // special case. We're also using File::writeBlock() just for the tag. // That's a bit slower than using char *'s so, we're only doing it here. seek(readPosition); int bytesRead = fread(aboutToOverwrite.data(), sizeof(char), bufferLength, d->file); readPosition += bufferLength; seek(writePosition); writeBlock(data); writePosition += data.size(); buffer = aboutToOverwrite; // In case we've already reached the end of file... buffer.resize(bytesRead); // Ok, here's the main loop. We want to loop until the read fails, which // means that we hit the end of the file. while(!buffer.isEmpty()) { // Seek to the current read position and read the data that we're about // to overwrite. Appropriately increment the readPosition. seek(readPosition); bytesRead = fread(aboutToOverwrite.data(), sizeof(char), bufferLength, d->file); aboutToOverwrite.resize(bytesRead); readPosition += bufferLength; // Check to see if we just read the last block. We need to call clear() // if we did so that the last write succeeds. if(ulong(bytesRead) < bufferLength) clear(); // Seek to the write position and write our buffer. Increment the // writePosition. seek(writePosition); fwrite(buffer.data(), sizeof(char), buffer.size(), d->file); writePosition += buffer.size(); // Make the current buffer the data that we read in the beginning. buffer = aboutToOverwrite; // Again, we need this for the last write. We don't want to write garbage // at the end of our file, so we need to set the buffer size to the amount // that we actually read. bufferLength = bytesRead; } }
bool IT::File::save() { if(readOnly()) { debug("IT::File::save() - Cannot save to a read only file."); return false; } seek(4); writeString(d->tag.title(), 25); writeByte(0); seek(2, Current); ushort length = 0; ushort instrumentCount = 0; ushort sampleCount = 0; if(!readU16L(length) || !readU16L(instrumentCount) || !readU16L(sampleCount)) return false; seek(15, Current); // write comment as instrument and sample names: StringList lines = d->tag.comment().split("\n"); for(ushort i = 0; i < instrumentCount; ++ i) { seek(192L + length + ((long)i << 2)); ulong instrumentOffset = 0; if(!readU32L(instrumentOffset)) return false; seek(instrumentOffset + 32); if(i < lines.size()) writeString(lines[i], 25); else writeString(String::null, 25); writeByte(0); } for(ushort i = 0; i < sampleCount; ++ i) { seek(192L + length + ((long)instrumentCount << 2) + ((long)i << 2)); ulong sampleOffset = 0; if(!readU32L(sampleOffset)) return false; seek(sampleOffset + 20); if((TagLib::uint)(i + instrumentCount) < lines.size()) writeString(lines[i + instrumentCount], 25); else writeString(String::null, 25); writeByte(0); } // write rest as message: StringList messageLines; for(uint i = instrumentCount + sampleCount; i < lines.size(); ++ i) messageLines.append(lines[i]); ByteVector message = messageLines.toString("\r").data(String::Latin1); // it's actually not really stated if the message needs a // terminating NUL but it does not hurt to add one: if(message.size() > 7999) message.resize(7999); message.append((char)0); ushort special = 0; ushort messageLength = 0; ulong messageOffset = 0; seek(46); if(!readU16L(special)) return false; ulong fileSize = File::length(); if(special & Properties::MessageAttached) { seek(54); if(!readU16L(messageLength) || !readU32L(messageOffset)) return false; if(messageLength == 0) messageOffset = fileSize; } else { messageOffset = fileSize; seek(46); writeU16L(special | 0x1); } if(messageOffset + messageLength >= fileSize) { // append new message seek(54); writeU16L(message.size()); writeU32L(messageOffset); seek(messageOffset); writeBlock(message); truncate(messageOffset + message.size()); } else { // Only overwrite existing message. // I'd need to parse (understand!) the whole file for more. // Although I could just move the message to the end of file // and let the existing one be, but that would waste space. message.resize(messageLength, 0); seek(messageOffset); writeBlock(message); } return true; }
void IT::File::read(bool) { if(!isOpen()) return; seek(0); READ_ASSERT(readBlock(4) == "IMPM"); READ_STRING(d->tag.setTitle, 26); seek(2, Current); READ_U16L_AS(length); READ_U16L_AS(instrumentCount); READ_U16L_AS(sampleCount); d->properties.setInstrumentCount(instrumentCount); d->properties.setSampleCount(sampleCount); READ_U16L(d->properties.setPatternCount); READ_U16L(d->properties.setVersion); READ_U16L(d->properties.setCompatibleVersion); READ_U16L(d->properties.setFlags); READ_U16L_AS(special); d->properties.setSpecial(special); READ_BYTE(d->properties.setGlobalVolume); READ_BYTE(d->properties.setMixVolume); READ_BYTE(d->properties.setBpmSpeed); READ_BYTE(d->properties.setTempo); READ_BYTE(d->properties.setPanningSeparation); READ_BYTE(d->properties.setPitchWheelDepth); // IT supports some kind of comment tag. Still, the // sample/instrument names are abused as comments so // I just add all together. String message; if(special & Properties::MessageAttached) { READ_U16L_AS(messageLength); READ_U32L_AS(messageOffset); seek(messageOffset); ByteVector messageBytes = readBlock(messageLength); READ_ASSERT(messageBytes.size() == messageLength); int index = messageBytes.find((char) 0); if(index > -1) messageBytes.resize(index, 0); messageBytes.replace('\r', '\n'); message = messageBytes; } seek(64); ByteVector pannings = readBlock(64); ByteVector volumes = readBlock(64); READ_ASSERT(pannings.size() == 64 && volumes.size() == 64); int channels = 0; for(int i = 0; i < 64; ++ i) { // Strictly speaking an IT file has always 64 channels, but // I don't count disabled and muted channels. // But this always gives 64 channels for all my files anyway. // Strangely VLC does report other values. I wonder how VLC // gets it's values. if((unsigned char) pannings[i] < 128 && volumes[i] > 0) ++channels; } d->properties.setChannels(channels); // real length might be shorter because of skips and terminator ushort realLength = 0; for(ushort i = 0; i < length; ++ i) { READ_BYTE_AS(order); if(order == 255) break; if(order != 254) ++ realLength; } d->properties.setLengthInPatterns(realLength); StringList comment; // Note: I found files that have nil characters somewhere // in the instrument/sample names and more characters // afterwards. The spec does not mention such a case. // Currently I just discard anything after a nil, but // e.g. VLC seems to interprete a nil as a space. I // don't know what is the proper behaviour. for(ushort i = 0; i < instrumentCount; ++ i) { seek(192L + length + ((long)i << 2)); READ_U32L_AS(instrumentOffset); seek(instrumentOffset); ByteVector instrumentMagic = readBlock(4); READ_ASSERT(instrumentMagic == "IMPI"); READ_STRING_AS(dosFileName, 13); seek(15, Current); READ_STRING_AS(instrumentName, 26); comment.append(instrumentName); } for(ushort i = 0; i < sampleCount; ++ i) { seek(192L + length + ((long)instrumentCount << 2) + ((long)i << 2)); READ_U32L_AS(sampleOffset); seek(sampleOffset); ByteVector sampleMagic = readBlock(4); READ_ASSERT(sampleMagic == "IMPS"); READ_STRING_AS(dosFileName, 13); READ_BYTE_AS(globalVolume); READ_BYTE_AS(sampleFlags); READ_BYTE_AS(sampleVolume); READ_STRING_AS(sampleName, 26); /* READ_BYTE_AS(sampleCvt); READ_BYTE_AS(samplePanning); READ_U32L_AS(sampleLength); READ_U32L_AS(loopStart); READ_U32L_AS(loopStop); READ_U32L_AS(c5speed); READ_U32L_AS(sustainLoopStart); READ_U32L_AS(sustainLoopEnd); READ_U32L_AS(sampleDataOffset); READ_BYTE_AS(vibratoSpeed); READ_BYTE_AS(vibratoDepth); READ_BYTE_AS(vibratoRate); READ_BYTE_AS(vibratoType); */ comment.append(sampleName); } if(message.size() > 0) comment.append(message); d->tag.setComment(comment.toString("\n")); d->tag.setTrackerName("Impulse Tracker"); }
bool FLAC::File::save() { if(readOnly()) { debug("FLAC::File::save() - Cannot save to a read only file."); return false; } // Create new vorbis comments Tag::duplicate(&d->tag, xiphComment(true), true); d->xiphCommentData = xiphComment()->render(false); // A Xiph comment portion of the data stream starts with a 4-byte descriptor. // The first byte indicates the frame type. The last three bytes are used // to give the length of the data segment. Here we start ByteVector data = ByteVector::fromUInt(d->xiphCommentData.size()); data[0] = char(VorbisComment); data.append(d->xiphCommentData); // If file already have comment => find and update it // if not => insert one // TODO: Search for padding and use that if(d->hasXiphComment) { long nextBlockOffset = d->flacStart; bool isLastBlock = false; while(!isLastBlock) { seek(nextBlockOffset); ByteVector header = readBlock(4); char blockType = header[0] & 0x7f; isLastBlock = (header[0] & 0x80) != 0; uint blockLength = header.mid(1, 3).toUInt(); if(blockType == VorbisComment) { long paddingBreak = 0; if(!isLastBlock) { paddingBreak = findPaddingBreak(nextBlockOffset + blockLength + 4, nextBlockOffset + d->xiphCommentData.size() + 8, &isLastBlock); } uint paddingLength = 0; if(paddingBreak) { // There is space for comment and padding blocks without rewriting the // whole file. Note: This cannot overflow. paddingLength = paddingBreak - (nextBlockOffset + d->xiphCommentData.size() + 8); } else { // Not enough space, so we will have to rewrite the whole file // following this block paddingLength = d->xiphCommentData.size(); if(paddingLength < MinPaddingLength) paddingLength = MinPaddingLength; paddingBreak = nextBlockOffset + blockLength + 4; } ByteVector padding = ByteVector::fromUInt(paddingLength); padding[0] = 1; if(isLastBlock) padding[0] |= 0x80; padding.resize(paddingLength + 4); ByteVector pair(data); pair.append(padding); insert(pair, nextBlockOffset, paddingBreak - nextBlockOffset); break; } nextBlockOffset += blockLength + 4; } } else { const long firstBlockOffset = d->flacStart; seek(firstBlockOffset); ByteVector header = readBlock(4); bool isLastBlock = (header[0] & 0x80) != 0; uint blockLength = header.mid(1, 3).toUInt(); if(isLastBlock) { // If the first block was previously also the last block, then we want to // mark it as no longer being the first block (the writeBlock() call) and // then set the data for the block that we're about to write to mark our // new block as the last block. seek(firstBlockOffset); writeBlock(static_cast<char>(header[0] & 0x7F)); data[0] |= 0x80; } insert(data, firstBlockOffset + blockLength + 4, 0); d->hasXiphComment = true; } // Update ID3 tags if(ID3v2Tag()) { if(d->hasID3v2) { if(d->ID3v2Location < d->flacStart) debug("FLAC::File::save() -- This can't be right -- an ID3v2 tag after the " "start of the FLAC bytestream? Not writing the ID3v2 tag."); else insert(ID3v2Tag()->render(), d->ID3v2Location, d->ID3v2OriginalSize); } else insert(ID3v2Tag()->render(), 0, 0); } if(ID3v1Tag()) { seek(-128, End); writeBlock(ID3v1Tag()->render()); } return true; }
void HashBloom::copy_to(ByteVector& v) const { v.resize(bloom.size() / 8); for(size_t i = 0; i < bloom.size(); ++i) { v[i/8] |= bloom[i] << (i % 8); } }