//cloned from hthor - a candidate for commoning up. static IKeyIndex *openKeyFile(IDistributedFilePart *keyFile) { unsigned numCopies = keyFile->numCopies(); assertex(numCopies); for (unsigned copy=0; copy < numCopies; copy++) { RemoteFilename rfn; try { OwnedIFile ifile = createIFile(keyFile->getFilename(rfn,copy)); unsigned __int64 thissize = ifile->size(); if (thissize != -1) { StringBuffer remotePath; rfn.getRemotePath(remotePath); unsigned crc = 0; keyFile->getCrc(crc); return createKeyIndex(remotePath.str(), crc, false, false); } } catch (IException *E) { EXCLOG(E, "While opening index file"); E->Release(); } } RemoteFilename rfn; StringBuffer url; keyFile->getFilename(rfn).getRemotePath(url); throw MakeStringException(1001, "Could not open key file at %s%s", url.str(), (numCopies > 1) ? " or any alternate location." : "."); }
virtual void init() { HashDistributeMasterBase::init(); // JCSMORE should common up some with indexread IHThorKeyedDistributeArg *helper = (IHThorKeyedDistributeArg *)queryHelper(); StringBuffer scoped; OwnedRoxieString indexFileName(helper->getIndexFileName()); queryThorFileManager().addScope(container.queryJob(), indexFileName, scoped); file.setown(queryThorFileManager().lookup(container.queryJob(), indexFileName)); if (!file) throw MakeActivityException(this, 0, "KeyedDistribute: Failed to find key: %s", scoped.str()); if (0 == file->numParts()) throw MakeActivityException(this, 0, "KeyedDistribute: Can't distribute based on an empty key: %s", scoped.str()); checkFormatCrc(this, file, helper->getFormatCrc(), true); Owned<IFileDescriptor> fileDesc = file->getFileDescriptor(); Owned<IPartDescriptor> tlkDesc = fileDesc->getPart(fileDesc->numParts()-1); if (!tlkDesc->queryProperties().hasProp("@kind") || 0 != stricmp("topLevelKey", tlkDesc->queryProperties().queryProp("@kind"))) throw MakeActivityException(this, 0, "Cannot distribute using a non-distributed key: '%s'", scoped.str()); unsigned location; OwnedIFile iFile; StringBuffer filePath; if (!getBestFilePart(this, *tlkDesc, iFile, location, filePath)) throw MakeThorException(TE_FileNotFound, "Top level key part does not exist, for key: %s", file->queryLogicalName()); OwnedIFileIO iFileIO = iFile->open(IFOread); assertex(iFileIO); tlkMb.append(iFileIO->size()); ::read(iFileIO, 0, (size32_t)iFileIO->size(), tlkMb); queryThorFileManager().noteFileRead(container.queryJob(), file); }
void renameDfuTempToFinal(const RemoteFilename & realname) { RemoteFilename tempFilename; StringBuffer newTailname; getDfuTempName(tempFilename, realname); realname.getTail(newTailname); OwnedIFile output = createIFile(tempFilename); try { output->rename(newTailname); } catch (IException * e) { EXCLOG(e, "Failed to rename target file"); StringBuffer oldName; realname.getPath(oldName); LOG(MCdebugInfoDetail, unknownJob, "Error: Rename %s->%s failed - tring to delete target and rename again", oldName.str(), newTailname.str()); e->Release(); OwnedIFile old = createIFile(realname); old->remove(); output->rename(newTailname); } }
extern bool isCompressedIndex(const char *filename) { OwnedIFile file = createIFile(filename); OwnedIFileIO io = file->open(IFOread); unsigned __int64 size = file->size(); if (size) { KeyHdr hdr; if (io->read(0, sizeof(hdr), &hdr) == sizeof(hdr)) { SwapBigEndian(hdr); if (size % hdr.nodeSize == 0 && hdr.phyrec == size-1 && hdr.root && hdr.root % hdr.nodeSize == 0 && hdr.ktype & (HTREE_COMPRESSED_KEY|HTREE_QUICK_COMPRESSED_KEY)) { NodeHdr root; if (io->read(hdr.root, sizeof(root), &root) == sizeof(root)) { SwapBigEndian(root); return root.leftSib==0 && root.rightSib==0; } } } } return false; }
bool ESDLcompiler::locateIncludedFile(StringBuffer& filepath, const char* prot, const char* srcDir, const char* fname, const char* ext) { StringBuffer alternateExtFilename; alternateExtFilename.setf("%s%s%s", (prot && *prot) ? prot : "", srcDir, fname); const char* alt_ext; if (stricmp(ext, LEGACY_FILE_EXTENSION)==0) alt_ext = ESDL_FILE_EXTENSION; else alt_ext = LEGACY_FILE_EXTENSION; alternateExtFilename.append(alt_ext); OwnedIFile fileInSrcDir = createIFile(alternateExtFilename.str()); if (fileInSrcDir->exists()) { filepath.set(alternateExtFilename.str()); return true; } ForEachItemIn(x, includeDirs) { const char* dir = includeDirs.item(x); if (dir && *dir) { StringBuffer pathInInclude(dir); pathInInclude.trim(); if (pathInInclude.charAt(pathInInclude.length() - 1) != PATHSEPCHAR) pathInInclude.append(PATHSEPCHAR); pathInInclude.append(fname); VStringBuffer pathInIncludeFull("%s%s", pathInInclude.str(), ext); OwnedIFile fileInInclude = createIFile(pathInIncludeFull.str()); if (fileInInclude->exists()) { filepath.set(pathInIncludeFull.str()); return true; } pathInIncludeFull.setf("%s%s", pathInInclude.str(), alt_ext); OwnedIFile altFileInInclude = createIFile(pathInIncludeFull.str()); if (altFileInInclude->exists()) { filepath.set(pathInIncludeFull.str()); return true; } } } filepath.clear(); return false; }
void prepareKey(IDistributedFile *index) { IDistributedFile *f = index; IDistributedSuperFile *super = f->querySuperFile(); unsigned nparts = f->numParts(); // includes tlks if any, but unused in array performPartLookup.ensure(nparts); bool checkTLKConsistency = (nullptr != super) && !localKey && (0 != (TIRsorted & indexBaseHelper->getFlags())); if (nofilter) { while (nparts--) performPartLookup.append(true); if (!checkTLKConsistency) return; } else { while (nparts--) performPartLookup.append(false); // parts to perform lookup set later } Owned<IDistributedFileIterator> iter; if (super) { iter.setown(super->getSubFileIterator(true)); verifyex(iter->first()); f = &iter->query(); } unsigned width = f->numParts(); if (!localKey) --width; assertex(width); unsigned tlkCrc = 0; bool first = true; unsigned superSubIndex=0; bool fileCrc = false, rowCrc = false; for (;;) { Owned<IDistributedFilePart> part = f->getPart(width); if (checkTLKConsistency) { unsigned _tlkCrc; if (part->getCrc(_tlkCrc)) fileCrc = true; else if (part->queryAttributes().hasProp("@crc")) // NB: key "@crc" is not a crc on the file, but data within. { _tlkCrc = part->queryAttributes().getPropInt("@crc"); rowCrc = true; } else if (part->queryAttributes().hasProp("@tlkCrc")) // backward compat. { _tlkCrc = part->queryAttributes().getPropInt("@tlkCrc"); rowCrc = true; } else { if (rowCrc || fileCrc) { checkTLKConsistency = false; Owned<IException> e = MakeActivityWarning(&container, 0, "Cannot validate that tlks in superfile %s match, some crc attributes are missing", super->queryLogicalName()); queryJobChannel().fireException(e); } } if (rowCrc && fileCrc) { checkTLKConsistency = false; Owned<IException> e = MakeActivityWarning(&container, 0, "Cannot validate that tlks in superfile %s match, due to mixed crc types.", super->queryLogicalName()); queryJobChannel().fireException(e); } if (checkTLKConsistency) { if (first) { tlkCrc = _tlkCrc; first = false; } else if (tlkCrc != _tlkCrc) throw MakeActivityException(this, 0, "Sorted output on super files comprising of non coparitioned sub keys is not supported (TLK's do not match)"); } } if (!nofilter) { Owned<IKeyIndex> keyIndex; unsigned copy; for (copy=0; copy<part->numCopies(); copy++) { RemoteFilename rfn; OwnedIFile ifile = createIFile(part->getFilename(rfn,copy)); if (ifile->exists()) { StringBuffer remotePath; rfn.getRemotePath(remotePath); unsigned crc = 0; part->getCrc(crc); keyIndex.setown(createKeyIndex(remotePath.str(), crc, false, false)); break; } } if (!keyIndex) throw MakeThorException(TE_FileNotFound, "Top level key part does not exist, for key: %s", index->queryLogicalName()); unsigned fixedSize = indexBaseHelper->queryDiskRecordSize()->querySerializedDiskMeta()->getFixedSize(); // used only if fixed Owned <IKeyManager> tlk = createLocalKeyManager(keyIndex, fixedSize, NULL); indexBaseHelper->createSegmentMonitors(tlk); tlk->finishSegmentMonitors(); tlk->reset(); while (tlk->lookup(false)) { if (tlk->queryFpos()) performPartLookup.replace(true, (aindex_t)(super?super->numSubFiles(true)*(tlk->queryFpos()-1)+superSubIndex:tlk->queryFpos()-1)); } } if (!super||!iter->next()) break; superSubIndex++; f = &iter->query(); if (width != f->numParts()-1) throw MakeActivityException(this, 0, "Super key %s, with mixture of sub key width are not supported.", f->queryLogicalName()); } }
extern jhtree_decl void validateKeyFile(const char *filename, offset_t nodePos) { OwnedIFile file = createIFile(filename); OwnedIFileIO io = file->open(IFOread); if (!io) throw MakeStringException(1, "Invalid key %s: cannot open file", filename); unsigned __int64 size = file->size(); if (!size) throw MakeStringException(2, "Invalid key %s: zero size", filename); KeyHdr hdr; if (io->read(0, sizeof(hdr), &hdr) != sizeof(hdr)) throw MakeStringException(4, "Invalid key %s: failed to read key header", filename); CKeyHdr keyHdr; keyHdr.load(hdr); _WINREV(hdr.phyrec); _WINREV(hdr.root); _WINREV(hdr.nodeSize); if (hdr.phyrec != size-1) throw MakeStringException(5, "Invalid key %s: phyrec was %" I64F "d, expected %" I64F "d", filename, hdr.phyrec, size-1); if (size % hdr.nodeSize) throw MakeStringException(3, "Invalid key %s: size %" I64F "d is not a multiple of key node size (%d)", filename, size, hdr.nodeSize); if (!hdr.root || hdr.root % hdr.nodeSize !=0) throw MakeStringException(6, "Invalid key %s: invalid root pointer %" I64F "x", filename, hdr.root); NodeHdr root; if (io->read(hdr.root, sizeof(root), &root) != sizeof(root)) throw MakeStringException(7, "Invalid key %s: failed to read root node", filename); _WINREV(root.rightSib); _WINREV(root.leftSib); if (root.leftSib || root.rightSib) throw MakeStringException(8, "Invalid key %s: invalid root node sibling pointers 0x%" I64F "x, 0x%" I64F "x (expected 0,0)", filename, root.leftSib, root.rightSib); for (offset_t nodeOffset = (nodePos ? nodePos : hdr.nodeSize); nodeOffset < (nodePos ? nodePos+1 : size); nodeOffset += hdr.nodeSize) { MemoryAttr ma; char *buffer = (char *) ma.allocate(hdr.nodeSize); { MTIME_SECTION(queryActiveTimer(), "JHTREE read index node"); io->read(nodeOffset, hdr.nodeSize, buffer); } CJHTreeNode theNode; { MTIME_SECTION(queryActiveTimer(), "JHTREE load index node"); theNode.load(&keyHdr, buffer, nodeOffset, true); } NodeHdr *nodeHdr = (NodeHdr *) buffer; SwapBigEndian(*nodeHdr); if (!nodeHdr->isValid(hdr.nodeSize)) throw MakeStringException(9, "Invalid key %s: invalid node header at position 0x%" I64F "x", filename, nodeOffset); if (nodeHdr->leftSib >= size || nodeHdr->rightSib >= size) throw MakeStringException(9, "Invalid key %s: out of range sibling pointers 0x%" I64F "x, 0x%" I64F "x at position 0x%" I64F "x", filename, nodeHdr->leftSib, nodeHdr->rightSib, nodeOffset); if (nodeHdr->crc32) { unsigned crc = crc32(buffer + sizeof(NodeHdr), nodeHdr->keyBytes, 0); if (crc != nodeHdr->crc32) throw MakeStringException(9, "Invalid key %s: crc mismatch at position 0x%" I64F "x", filename, nodeOffset); } else { // MORE - if we felt so inclined, we could decode the node and check records were in ascending order } } }
void CWriteMasterBase::publish() { if (published) return; published = true; if (!(diskHelperBase->getFlags() & (TDXtemporary|TDXjobtemp))) updateActivityResult(container.queryJob().queryWorkUnit(), diskHelperBase->getFlags(), diskHelperBase->getSequence(), fileName, recordsProcessed); IPropertyTree &props = fileDesc->queryProperties(); props.setPropInt64("@recordCount", recordsProcessed); if (0 == (diskHelperBase->getFlags() & TDXtemporary) || container.queryJob().queryUseCheckpoints()) { if (0 != (diskHelperBase->getFlags() & TDWexpires)) setExpiryTime(props, diskHelperBase->getExpiryDays()); if (TDWupdate & diskHelperBase->getFlags()) { unsigned eclCRC; unsigned __int64 totalCRC; diskHelperBase->getUpdateCRCs(eclCRC, totalCRC); props.setPropInt("@eclCRC", eclCRC); props.setPropInt64("@totalCRC", totalCRC); } } container.queryTempHandler()->registerFile(fileName, container.queryOwner().queryGraphId(), diskHelperBase->getTempUsageCount(), TDXtemporary & diskHelperBase->getFlags(), getDiskOutputKind(diskHelperBase->getFlags()), &clusters); if (!dlfn.isExternal()) { bool temporary = 0 != (diskHelperBase->getFlags()&TDXtemporary); if (!temporary && (queryJob().querySlaves() < fileDesc->numParts())) { // create empty parts for a fileDesc being published that is larger than this clusters size32_t recordSize = 0; IOutputMetaData *diskRowMeta = diskHelperBase->queryDiskRecordSize()->querySerializedDiskMeta(); if (diskRowMeta->isFixedSize() && (TAKdiskwrite == container.getKind())) { recordSize = diskRowMeta->getMinRecordSize(); if (0 != (diskHelperBase->getFlags() & TDXgrouped)) recordSize += 1; } unsigned compMethod = COMPRESS_METHOD_LZW; // rowdiff used if recordSize > 0, else fallback to compMethod if (getOptBool(THOROPT_COMP_FORCELZW, false)) { recordSize = 0; // by default if fixed length (recordSize set), row diff compression is used. This forces compMethod. compMethod = COMPRESS_METHOD_LZW; } else if (getOptBool(THOROPT_COMP_FORCEFLZ, false)) compMethod = COMPRESS_METHOD_FASTLZ; else if (getOptBool(THOROPT_COMP_FORCELZ4, false)) compMethod = COMPRESS_METHOD_LZ4; bool blockCompressed; bool compressed = fileDesc->isCompressed(&blockCompressed); for (unsigned clusterIdx=0; clusterIdx<fileDesc->numClusters(); clusterIdx++) { StringBuffer clusterName; fileDesc->getClusterGroupName(clusterIdx, clusterName, &queryNamedGroupStore()); PROGLOG("Creating blank parts for file '%s', cluster '%s'", fileName.get(), clusterName.str()); unsigned p=0; while (p<fileDesc->numParts()) { if (p == targetOffset) p += queryJob().querySlaves(); IPartDescriptor *partDesc = fileDesc->queryPart(p); CDateTime createTime, modifiedTime; for (unsigned c=0; c<partDesc->numCopies(); c++) { RemoteFilename rfn; partDesc->getFilename(c, rfn); StringBuffer path; rfn.getPath(path); try { ensureDirectoryForFile(path.str()); OwnedIFile iFile = createIFile(path.str()); Owned<IFileIO> iFileIO; if (compressed) iFileIO.setown(createCompressedFileWriter(iFile, recordSize, false, true, NULL, compMethod)); else iFileIO.setown(iFile->open(IFOcreate)); dbgassertex(iFileIO.get()); iFileIO.clear(); // ensure copies have matching datestamps, as they would do normally (backupnode expects it) if (partDesc->numCopies() > 1) { if (0 == c) iFile->getTime(&createTime, &modifiedTime, NULL); else iFile->setTime(&createTime, &modifiedTime, NULL); } } catch (IException *e) { if (0 == c) throw; Owned<IThorException> e2 = MakeThorException(e); e->Release(); e2->setAction(tea_warning); queryJob().fireException(e2); } } partDesc->queryProperties().setPropInt64("@size", 0); p++; } clusterIdx++; } } queryThorFileManager().publish(container.queryJob(), fileName, *fileDesc, NULL); } }
static void _doReplicate(CActivityBase *activity, IPartDescriptor &partDesc, ICopyFileProgress *iProgress) { StringBuffer primaryName; getPartFilename(partDesc, 0, primaryName);; RemoteFilename rfn; IFileDescriptor &fileDesc = partDesc.queryOwner(); unsigned copies = partDesc.numCopies(); unsigned c=1; for (; c<copies; c++) { unsigned replicateCopy; unsigned clusterNum = partDesc.copyClusterNum(c, &replicateCopy); rfn.clear(); partDesc.getFilename(c, rfn); StringBuffer dstName; rfn.getPath(dstName); assertex(dstName.length()); if (replicateCopy>0 ) { try { queryThor().queryBackup().backup(dstName.str(), primaryName.str()); } catch (IException *e) { Owned<IThorException> re = MakeActivityWarning(activity, e, "Failed to create replicate file '%s'", dstName.str()); e->Release(); activity->fireException(re); } } else // another primary { ActPrintLog(activity, "Copying to primary %s", dstName.str()); StringBuffer tmpName(dstName.str()); tmpName.append(".tmp"); OwnedIFile tmpIFile = createIFile(tmpName.str()); OwnedIFile srcFile = createIFile(primaryName.str()); CFIPScope fipScope(tmpName.str()); try { try { ensureDirectoryForFile(dstName.str()); ::copyFile(tmpIFile, srcFile, 0x100000, iProgress); } catch (IException *e) { IThorException *re = MakeActivityException(activity, e, "Failed to copy to tmp file '%s' from source file '%s'", tmpIFile->queryFilename(), srcFile->queryFilename()); e->Release(); throw re; } try { OwnedIFile dstIFile = createIFile(dstName.str()); dstIFile->remove(); tmpIFile->rename(pathTail(dstName.str())); } catch (IException *e) { IThorException *re = ThorWrapException(e, "Failed to rename '%s' to '%s'", tmpName.str(), dstName.str()); e->Release(); throw re; } } catch (IException *) { try { tmpIFile->remove(); } catch (IException *e) { ActPrintLog(&activity->queryContainer(), e, NULL); e->Release(); } throw; } } } }
virtual void init() { CMasterActivity::init(); OwnedRoxieString indexFileName(helper->getIndexFileName()); Owned<IDistributedFile> dataFile; Owned<IDistributedFile> indexFile = queryThorFileManager().lookup(container.queryJob(), indexFileName, false, 0 != (helper->getJoinFlags() & JFindexoptional), true); unsigned keyReadWidth = (unsigned)container.queryJob().getWorkUnitValueInt("KJKRR", 0); if (!keyReadWidth || keyReadWidth>container.queryJob().querySlaves()) keyReadWidth = container.queryJob().querySlaves(); initMb.clear(); initMb.append(indexFileName.get()); if (helper->diskAccessRequired()) numTags += 2; initMb.append(numTags); unsigned t=0; for (; t<numTags; t++) { tags[t] = container.queryJob().allocateMPTag(); initMb.append(tags[t]); } bool keyHasTlk = false; if (indexFile) { unsigned numParts = 0; localKey = indexFile->queryAttributes().getPropBool("@local"); if (container.queryLocalData() && !localKey) throw MakeActivityException(this, 0, "Keyed Join cannot be LOCAL unless supplied index is local"); checkFormatCrc(this, indexFile, helper->getIndexFormatCrc(), true); Owned<IFileDescriptor> indexFileDesc = indexFile->getFileDescriptor(); IDistributedSuperFile *superIndex = indexFile->querySuperFile(); unsigned superIndexWidth = 0; unsigned numSuperIndexSubs = 0; if (superIndex) { numSuperIndexSubs = superIndex->numSubFiles(true); bool first=true; // consistency check Owned<IDistributedFileIterator> iter = superIndex->getSubFileIterator(true); ForEach(*iter) { IDistributedFile &f = iter->query(); unsigned np = f.numParts()-1; IDistributedFilePart &part = f.queryPart(np); const char *kind = part.queryAttributes().queryProp("@kind"); bool hasTlk = NULL != kind && 0 == stricmp("topLevelKey", kind); // if last part not tlk, then deemed local (might be singlePartKey) if (first) { first = false; keyHasTlk = hasTlk; superIndexWidth = f.numParts(); if (keyHasTlk) --superIndexWidth; } else { if (hasTlk != keyHasTlk) throw MakeActivityException(this, 0, "Local/Single part keys cannot be mixed with distributed(tlk) keys in keyedjoin"); if (keyHasTlk && superIndexWidth != f.numParts()-1) throw MakeActivityException(this, 0, "Super sub keys of different width cannot be mixed with distributed(tlk) keys in keyedjoin"); } } if (keyHasTlk) numParts = superIndexWidth * numSuperIndexSubs; else numParts = superIndex->numParts(); } else { numParts = indexFile->numParts(); if (numParts) { const char *kind = indexFile->queryPart(indexFile->numParts()-1).queryAttributes().queryProp("@kind"); keyHasTlk = NULL != kind && 0 == stricmp("topLevelKey", kind); if (keyHasTlk) --numParts; } } if (numParts) { initMb.append(numParts); initMb.append(superIndexWidth); // 0 if not superIndex initMb.append((superIndex && superIndex->isInterleaved()) ? numSuperIndexSubs : 0); unsigned p=0; UnsignedArray parts; for (; p<numParts; p++) parts.append(p); indexFileDesc->serializeParts(initMb, parts); if (localKey) keyHasTlk = false; // not used initMb.append(keyHasTlk); if (keyHasTlk) { if (numSuperIndexSubs) initMb.append(numSuperIndexSubs); else initMb.append((unsigned)1); Owned<IDistributedFileIterator> iter; IDistributedFile *f; if (superIndex) { iter.setown(superIndex->getSubFileIterator(true)); f = &iter->query(); } else f = indexFile; loop { unsigned location; OwnedIFile iFile; StringBuffer filePath; Owned<IFileDescriptor> fileDesc = f->getFileDescriptor(); Owned<IPartDescriptor> tlkDesc = fileDesc->getPart(fileDesc->numParts()-1); if (!getBestFilePart(this, *tlkDesc, iFile, location, filePath)) throw MakeThorException(TE_FileNotFound, "Top level key part does not exist, for key: %s", f->queryLogicalName()); OwnedIFileIO iFileIO = iFile->open(IFOread); assertex(iFileIO); size32_t tlkSz = (size32_t)iFileIO->size(); initMb.append(tlkSz); ::read(iFileIO, 0, tlkSz, initMb); if (!iter || !iter->next()) break; f = &iter->query(); } } if (helper->diskAccessRequired()) { OwnedRoxieString fetchFilename(helper->getFileName()); if (fetchFilename) { dataFile.setown(queryThorFileManager().lookup(container.queryJob(), fetchFilename, false, 0 != (helper->getFetchFlags() & FFdatafileoptional), true)); if (dataFile) { if (superIndex) throw MakeActivityException(this, 0, "Superkeys and full keyed joins are not supported"); Owned<IFileDescriptor> dataFileDesc = getConfiguredFileDescriptor(*dataFile); void *ekey; size32_t ekeylen; helper->getFileEncryptKey(ekeylen,ekey); bool encrypted = dataFileDesc->queryProperties().getPropBool("@encrypted"); if (0 != ekeylen) { memset(ekey,0,ekeylen); free(ekey); if (!encrypted) { Owned<IException> e = MakeActivityWarning(&container, TE_EncryptionMismatch, "Ignoring encryption key provided as file '%s' was not published as encrypted", helper->getFileName()); container.queryJob().fireException(e); } } else if (encrypted) throw MakeActivityException(this, 0, "File '%s' was published as encrypted but no encryption key provided", fetchFilename.get()); unsigned dataReadWidth = (unsigned)container.queryJob().getWorkUnitValueInt("KJDRR", 0); if (!dataReadWidth || dataReadWidth>container.queryJob().querySlaves()) dataReadWidth = container.queryJob().querySlaves(); Owned<IGroup> grp = container.queryJob().querySlaveGroup().subset((unsigned)0, dataReadWidth); dataFileMapping.setown(getFileSlaveMaps(dataFile->queryLogicalName(), *dataFileDesc, container.queryJob().queryUserDescriptor(), *grp, false, false, NULL)); dataFileMapping->serializeFileOffsetMap(offsetMapMb.clear()); } else indexFile.clear(); } } } else