void DatFile::listDir(uint32_t position, vector<uint32_t>& result) const { vector<uint8_t> nodeData = readBlocks(position, sizeof(BTNode)); BTNode* node = reinterpret_cast<BTNode*>(nodeData.data()); if(node->numEntries > kMaxEntries) { throw runtime_error("Node has bad entry count"); } for(uint32_t i = 0; i < node->numEntries; i++) { if(node->nextNode[0] != 0) { listDir(node->nextNode[i], result); } result.push_back(node->entries[i].id); } if(node->nextNode[0] != 0) { listDir(node->nextNode[node->numEntries], result); } }
AvatarAsset::AvatarAsset(FileIO *io) : io(io), ioPassedIn(true) { metadata.gender = (AssetGender)0; customColors.entries = NULL; customColors.count = 0; readHeader(); readBlocks(); }
void VolumeFileManager::startBlockInterpolation() { if (m_block) delete [] m_block; int bps = m_width*m_height*m_bytesPerVoxel; m_block = new uchar[m_blockSlices*bps]; readBlocks(0); }
AvatarAsset::AvatarAsset(string assetPath) : ioPassedIn(false) { metadata.gender = (AssetGender)0; customColors.entries = NULL; customColors.count = 0; animation.frameCount = 0; io = new FileIO(assetPath); readHeader(); readBlocks(); }
vector<uint8_t> DatFile::read(uint32_t id) const { uint32_t position = rootPosition_; for(;;) { vector<uint8_t> nodeData = readBlocks(position, sizeof(BTNode)); BTNode* node = reinterpret_cast<BTNode*>(nodeData.data()); if(node->numEntries > kMaxEntries) { throw runtime_error("Node has bad entry count"); } uint32_t i = 0; for(; i < node->numEntries; i++) { if(id <= node->entries[i].id) { break; } } if(i < node->numEntries && id == node->entries[i].id) { return readBlocks(node->entries[i].position, node->entries[i].size); } if(node->nextNode[0] == 0) { return vector<uint8_t>(); } position = node->nextNode[i]; } }
RideFile *TacxCafFileReader::openRideFile(QFile &file, QStringList &errors, QList<RideFile*>*) const { if (!file.open(QFile::ReadOnly)) { errors << ("Could not open ride file: \"" + file.fileName() + "\""); return NULL; } QByteArray bytes = file.readAll(); file.close(); qint16 version = readHeaderBlock(bytes.left(TACX_HEADER_BLOCK_SIZE), errors); if (version == 0) return NULL; return readBlocks(bytes.mid(TACX_HEADER_BLOCK_SIZE), version, errors); }
RunLengthBitVectorStream(std::istream & rin, uint64_t const rbaseoffset = 0) : in(rin), blocksize(readBlockSize(in)), n(readN(in)), indexpos(readIndexPos(in)), blocks(readBlocks(in,indexpos)), pointerarrayoffset(indexpos + 1*sizeof(uint64_t)), baseoffset(rbaseoffset), rankaccbits(libmaus::rank::RunLengthBitVectorBase::getRankAccBits()) { #if 0 std::cerr << "indexpos=" << indexpos << std::endl; std::cerr << "blocksize=" << blocksize << std::endl; std::cerr << "blocks=" << blocks << std::endl; std::cerr << "n=" << n << std::endl; #endif }
uchar* VolumeFileManager::blockInterpolatedRawValue(float dv, float wv, float hv) { int d = dv; int w = wv; int h = hv; int d1 = d+1; int w1 = w+1; int h1 = h+1; float dd = dv-d; float ww = wv-w; float hh = hv-h; int bps = m_width*m_height*m_bytesPerVoxel; if (!m_slice) m_slice = new uchar[bps]; // at most we will be reading an 8 byte value // initialize first 8 bytes to 0 memset(m_slice, 0, 8); if (d < 0 || d1 >= m_depth || w < 0 || w1 >= m_width || h < 0 || h1 >= m_height) return m_slice; int da[8], wa[8], ha[8]; da[0]=d; wa[0]=w; ha[0]=h; da[1]=d; wa[1]=w; ha[1]=h1; da[2]=d; wa[2]=w1; ha[2]=h; da[3]=d; wa[3]=w1; ha[3]=h1; da[4]=d1; wa[4]=w; ha[4]=h; da[5]=d1; wa[5]=w; ha[5]=h1; da[6]=d1; wa[6]=w1; ha[6]=h; da[7]=d1; wa[7]=w1; ha[7]=h1; uchar *rv = new uchar[8*m_bytesPerVoxel]; if (!m_block) readBlocks(da[0]); for(int i=0; i<8; i++) { if (da[i] < m_startBlock || da[i] >= m_endBlock) readBlocks(da[i]); memcpy((char*)rv+i*m_bytesPerVoxel, m_block + (da[i]-m_startBlock)*bps + (wa[i]*m_height + ha[i])*m_bytesPerVoxel, m_bytesPerVoxel); } if (m_voxelType == _UChar) { interpVal(uchar); } else if (m_voxelType == _Char) { interpVal(char); } else if (m_voxelType == _UShort) { interpVal(ushort); } else if (m_voxelType == _Short) { interpVal(short); } else if (m_voxelType == _Int) { interpVal(int); } else if (m_voxelType == _Float) { interpVal(float); } delete [] rv; return m_slice; }
/* * infile: input filename * size: size in blocks of input file * outfile: output filename * field: which field will be used for sorting * buffer: the buffer that is used * memSize: number of buffer blocks available for use, without counting the last one, which is for output * nunique: number of unique values * nios: number of ios * * when the input file fits the buffer and there's still a block available for output, * hashes each record and writes it to the output, if a record of same value is not * found on the corresponding bucket. */ void hashElimination(char *infile, uint size, char *outfile, unsigned char field, block_t *buffer, uint memSize, uint *nunique, uint *nios) { int out = open(outfile, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU); block_t *bufferOut = buffer + memSize; emptyBlock(bufferOut); (*bufferOut).valid = true; (*bufferOut).blockid = 0; (*nunique) = 0; (*nios) += readBlocks(infile, buffer, size); // creates a hash index. for each value returned from the hash function, // there is a linkedList of pointers to the records with that specific hash // value uint hashSize = size*MAX_RECORDS_PER_BLOCK; linkedRecordPtr **hashIndex = (linkedRecordPtr**) malloc(hashSize * sizeof (linkedRecordPtr*)); for (uint i = 0; i < hashSize; i++) { hashIndex[i] = NULL; } recordPtr start = newPtr(0); recordPtr end = newPtr(size * MAX_RECORDS_PER_BLOCK - 1); for (; start <= end; incr(start)) { if (!buffer[start.block].valid) { start.record = MAX_RECORDS_PER_BLOCK - 1; continue; } record_t record = getRecord(buffer, start); if (record.valid) { // hashes the record being examined uint index = hashRecord(infile, record, hashSize, field); linkedRecordPtr *element = hashIndex[index]; // goes through the linked list for the hash value of the record // if a record with same value is not found, then a recordPtr is // added to the linked list and the record itself is written to // the output. otherwise, it is ignored. while (element) { if (compareRecords(record, getRecord(buffer, element->ptr), field) == 0) { break; } element = element->next; } if (!element) { element = (linkedRecordPtr*) malloc(sizeof (linkedRecordPtr)); element->ptr = start; element->next = hashIndex[index]; hashIndex[index] = element; (*bufferOut).entries[(*bufferOut).nreserved++] = record; (*nunique) += 1; if ((*bufferOut).nreserved == MAX_RECORDS_PER_BLOCK) { (*nios) += writeBlocks(out, bufferOut, 1); emptyBlock(bufferOut); (*bufferOut).blockid += 1; } } } } // writes records left in buffer to the outfile if ((*bufferOut).nreserved != 0) { (*nios) += writeBlocks(out, bufferOut, 1); } destroyHashIndex(hashIndex, size); close(out); }
void EliminateDuplicates(char *infile, unsigned char field, block_t *buffer, unsigned int nmem_blocks, char *outfile, unsigned int *nunique, unsigned int *nios) { if (nmem_blocks < 3) { printf("At least 3 blocks are required."); return; } // empties the buffer emptyBuffer(buffer, nmem_blocks); uint memSize = nmem_blocks - 1; *nunique = 0; *nios = 0; uint fileSize = getSize(infile); // if the relation fits on the buffer and leaves one block free for output, // loads it to the buffer and eliminates duplicates using hashing if (fileSize <= memSize) { hashElimination(infile, fileSize, outfile, field, buffer, memSize, nunique, nios); } else if (fileSize == nmem_blocks) { // if the relation completely fits the buffer, calls useFirstBlock useFirstBlock(infile, outfile, field, buffer, nmem_blocks, nunique, nios); } else { // if the relation is larger than the buffer, then sort it using mergesort, // BUT during the final merging (during last pass) write to the output // only one time each value // the following code is similar to that of MergeSort: int input, output; char tmpFile1[] = ".ed1"; char tmpFile2[] = ".ed2"; uint fullSegments = fileSize / nmem_blocks; uint remainingSegment = fileSize % nmem_blocks; input = open(infile, O_RDONLY, S_IRWXU); output = open(tmpFile1, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU); uint nSortedSegs = 0; uint segmentSize = nmem_blocks; for (uint i = 0; i <= fullSegments; i++) { if (fullSegments == i) { if (remainingSegment != 0) { segmentSize = remainingSegment; } else { break; } } (*nios) += readBlocks(input, buffer, segmentSize); if (sortBuffer(buffer, segmentSize, field)) { (*nios) += writeBlocks(output, buffer, segmentSize); nSortedSegs += 1; } } close(input); close(output); segmentSize = nmem_blocks; uint lastSegmentSize; if (remainingSegment == 0) { lastSegmentSize = nmem_blocks; } else { lastSegmentSize = remainingSegment; } buffer[memSize].valid = true; while (nSortedSegs != 1) { input = open(tmpFile1, O_RDONLY, S_IRWXU); output = open(tmpFile2, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU); uint newSortedSegs = 0; uint fullMerges = nSortedSegs / memSize; uint lastMergeSegs = nSortedSegs % memSize; uint *blocksLeft = (uint*) malloc(memSize * sizeof (uint)); uint segsToMerge = memSize; bool lastMerge = false; for (uint mergeCounter = 0; mergeCounter <= fullMerges; mergeCounter++) { uint firstSegOffset = mergeCounter * memSize * segmentSize; if (mergeCounter == fullMerges - 1 && lastMergeSegs == 0) { lastMerge = true; } else if (mergeCounter == fullMerges) { if (lastMergeSegs != 0) { segsToMerge = lastMergeSegs; lastMerge = true; } else { break; } } for (uint i = 0; i < segsToMerge; i++) { (*nios) += preadBlocks(input, buffer + i, (firstSegOffset + i * segmentSize), 1); blocksLeft[i] = segmentSize - 1; } if (lastMerge) { blocksLeft[segsToMerge - 1] = lastSegmentSize - 1; } (*nios) += mergeElimination(input, output, buffer, memSize, segsToMerge, blocksLeft, segmentSize, firstSegOffset, field, nSortedSegs <= memSize, lastMerge, nunique); newSortedSegs += 1; } free(blocksLeft); if (lastMergeSegs == 0) { lastSegmentSize = (memSize - 1) * segmentSize + lastSegmentSize; } else { lastSegmentSize = (lastMergeSegs - 1) * segmentSize + lastSegmentSize; } segmentSize *= memSize; nSortedSegs = newSortedSegs; close(input); close(output); char tmp = tmpFile1[3]; tmpFile1[3] = tmpFile2[3]; tmpFile2[3] = tmp; } rename(tmpFile1, outfile); remove(tmpFile2); } }
/* * infile: filename of the input file * outfile: filename of the output file * field: which field will be used for sorting * buffer: the buffer used * nmem_blocks: size of buffer * nunique: number of unique values * nios: number of ios * * when the input file size is equal to buffer, the whole file is loaded and * sorted. then the first block is used as output where only unique values are * written */ void useFirstBlock(char *infile, char *outfile, unsigned char field, block_t *buffer, uint nmem_blocks, uint *nunique, uint *nios) { int out = open(outfile, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU); (*nios) += readBlocks(infile, buffer, nmem_blocks); if (sortBuffer(buffer, nmem_blocks, field)) { // all the unique values of the first block are shifted to the start // of it. the rest are marked as invalid recordPtr i = newPtr(1); recordPtr j = newPtr(1); (*nunique) += 1; buffer[0].nreserved = 1; for (; j.block < 1; incr(j)) { record_t record = getRecord(buffer, j); if (record.valid && compareRecords(record, getRecord(buffer, i - 1), field) != 0) { setRecord(buffer, record, i); (*nunique) += 1; incr(i); buffer[0].nreserved += 1; } } j = newPtr(i, 0); for (; j.block < 1; incr(j)) { buffer[j.block].entries[j.record].valid = false; } record_t *lastRecordAdded = (record_t*) malloc(sizeof (record_t)); record_t lastUnique = getRecord(buffer, i - 1); memcpy(lastRecordAdded, &lastUnique, sizeof (record_t)); // if the first block is full after the shifting (meaning that all its // values were actually unique), writes it to the outfile and empties it if (buffer[0].nreserved == MAX_RECORDS_PER_BLOCK) { i.block -= 1; (*nios) += writeBlocks(out, buffer, 1); emptyBlock(buffer); buffer[0].blockid += 1; } // write the unique values of the other blocks to the first one. if it // becomes full writes it to outfile and empties it. at the end, if it // has records not writtend yet, writes them to the outfile as well. j = newPtr(MAX_RECORDS_PER_BLOCK); while (buffer[j.block].valid && j.block < nmem_blocks) { record_t record = getRecord(buffer, j); if (!record.valid) { break; } if (compareRecords(record, (*lastRecordAdded), field) != 0) { setRecord(buffer, record, i); memcpy(lastRecordAdded, &record, sizeof (record_t)); (*nunique) += 1; incr(i); buffer[0].nreserved += 1; } if (buffer[0].nreserved == MAX_RECORDS_PER_BLOCK) { i.block -= 1; (*nios) += writeBlocks(out, buffer, 1); emptyBlock(buffer); buffer[0].blockid += 1; } incr(j); } if (buffer[0].nreserved != 0) { (*nios) += writeBlocks(out, buffer, 1); } free(lastRecordAdded); } close(out); }
/** * Read a block from disk, directly. * * There should always enough space in dest, or it would assert failed * * @param index index of block * @param dest a piece of memory where the driver reads to * @see readBlocks(BlockIndex index, Length count, Slice dest) */ virtual void readBlock(BlockIndex index, Slice dest) { readBlocks(index, 1, dest); }