bool OdtWriter::write(QIODevice* device, const QTextDocument* document) { if (!m_flat) { return writeCompressed(device, document); } else { return writeUncompressed(device, document); } }
off_t Compress::copy(int readFd, off_t writeOffset, int writeFd, LayerMap& writeLm) { boost::scoped_array<char> buf(new char[g_BufferedMemorySize]); ssize_t bytes; // Start reading from the begining of the file. off_t readOffset = 0; while ((bytes = readCompressed(buf.get(), g_BufferedMemorySize, readOffset, readFd)) > 0) { writeOffset = writeCompressed(writeLm, readOffset, writeOffset, buf.get(), bytes, writeFd, writeOffset); if (writeOffset == -1) return -1; assert (writeOffset == bytes); readOffset += bytes; } return writeOffset; }
/** compress Image */ void compressImg(image img, image output, s_args args){ int *quantizeData = malloc(sizeof(int)*BLOCK_SIZE*BLOCK_SIZE); int *vectorizeData = malloc(sizeof(int)*BLOCK_SIZE*BLOCK_SIZE); float *dctData = malloc(sizeof(float)*BLOCK_SIZE*BLOCK_SIZE); int offset = 0; for (int i = 0; i < img.h; i += BLOCK_SIZE){ for (int j = 0; j < img.w; j+= BLOCK_SIZE) { dct(&img, dctData, j, i); quantize(dctData, quantizeData); vectorize(quantizeData, vectorizeData); putCompressedValues(&output, vectorizeData, &offset); } } writeCompressed(args.outFilename, &output); free(quantizeData); free(vectorizeData); free(dctData); }
off_t Compress::cleverCopy(int readFd, off_t writeOffset, int writeFd, LayerMap& writeLm) { off_t offset = 0; off_t size = m_fh.size; Block block; off_t len; while (size > 0) { if (!m_lm.Get(offset, block, len)) { // Block not found. There also is no block on a upper // offset. // break; } if (len) { // Block covers the offset, we can read len bytes // from it's de-compressed stream... try { boost::scoped_array<char> buf(new char[block.length]); // Read old block (or part of it we need)... off_t r = readBlock(readFd, block, size, len, offset, buf.get()); // Write new block... writeOffset = writeCompressed(writeLm, offset, writeOffset, buf.get(), r, writeFd, writeOffset); if (writeOffset == -1) return -1; assert (r == writeOffset); offset += r; size -= r; } catch (...) { rError("%s: Block read failed: block.offset:%lx, block.coffset:%lx, block.length: %lx, block.clength: %lx", __PRETTY_FUNCTION__, (long int) block.offset, (long int) block.coffset, (long int) block.length, (long int) block.clength); return -1; } } else { off_t r; // Block doesn't exists on the offset, but there is // a Block on the bigger offset. r = min(block.offset - offset, (off_t) (size)); offset += r; size -= r; } } return writeOffset; }
ssize_t Compress::write(const char *buf, size_t size, off_t offset) { // Spurious call to write when file has not been opened // happened during testing... if (m_fd == -1) { rWarning("Compress::write Spurios call detected!"); errno = -EBADF; return -1; } assert (m_fd != -1); rDebug("Compress::write size: 0x%lx, offset: 0x%lx", (long int) size, (long int) offset); // We have an oppourtunity to decide whether we really // want to compress the file. We use file magic library // to detect mime type of the file to decide the compress // strategy. if ((m_IsCompressed == true) && (offset == 0) && (m_RawFileSize == FileHeader::MaxSize) && (g_CompressedMagic.isNativelyCompressed(buf, size))) { m_IsCompressed = false; } if (m_IsCompressed == false) { return pwrite(m_fd, buf, size, offset); } else { // If we write data containing only zeros to the end of the file, // we can just increase size of the file. No need to really // compress and write buffer of zeros... if ((m_fh.size == offset) && FileUtils::isZeroOnly(buf, size)) { assert(size > 0); m_fh.size = offset + size; } else { off_t rawFileSize = writeCompressed(m_lm, offset, m_RawFileSize, buf, size, m_fd, m_RawFileSize); if (rawFileSize == -1) return -1; m_RawFileSize = rawFileSize; assert(size > 0); m_fh.size = max(m_fh.size, (off_t) (offset + size)); // Defragment the file only if raw file size if bigger than 4096 bytes // and raw file size is about 20% bigger than it would be uncompressed. if (m_RawFileSize > 4096 && m_RawFileSize > m_fh.size + ((m_fh.size * 2) / 10)) { DefragmentFast(); } } return size; } }