virtual int getPasswordDaysRemaining() {assertex(false);return -1;}
void process() { ActPrintLog("GlobalMergeActivityMaster::process"); CMasterActivity::process(); IHThorMergeArg *helper = (IHThorMergeArg *)queryHelper(); Owned<IRowInterfaces> rowif = createRowInterfaces(helper->queryOutputMeta(),queryActivityId(),queryCodeContext()); CThorKeyArray sample(*this, rowif,helper->querySerialize(),helper->queryCompare(),helper->queryCompareKey(),helper->queryCompareRowKey()); unsigned n = container.queryJob().querySlaves(); mptag_t *replytags = new mptag_t[n]; mptag_t *intertags = new mptag_t[n]; unsigned i; for (i=0;i<n;i++) { replytags[i] = TAG_NULL; intertags[i] = TAG_NULL; } try { for (i=0;i<n;i++) { if (abortSoon) return; CMessageBuffer mb; #ifdef _TRACE ActPrintLog("Merge process, Receiving on tag %d",replyTag); #endif rank_t sender; if (!receiveMsg(mb, RANK_ALL, replyTag, &sender)||abortSoon) return; #ifdef _TRACE ActPrintLog("Merge process, Received sample from %d",sender); #endif sender--; assertex((unsigned)sender<n); assertex(replytags[(unsigned)sender]==TAG_NULL); deserializeMPtag(mb,replytags[(unsigned)sender]); deserializeMPtag(mb,intertags[(unsigned)sender]); sample.deserialize(mb,true); } ActPrintLog("GlobalMergeActivityMaster::process samples merged"); sample.createSortedPartition(n); ActPrintLog("GlobalMergeActivityMaster::process partition generated"); for (i=0;i<n;i++) { if (abortSoon) return; CMessageBuffer mb; mb.append(n); for (unsigned j = 0;j<n;j++) serializeMPtag(mb,intertags[j]); sample.serialize(mb); #ifdef _TRACE ActPrintLog("Merge process, Replying to node %d tag %d",i+1,replytags[i]); #endif if (!container.queryJob().queryJobComm().send(mb, (rank_t)i+1, replytags[i])) return; } } catch (IException *e) { delete [] replytags; delete [] intertags; ActPrintLog(e, "MERGE"); throw; } delete [] replytags; delete [] intertags; ActPrintLog("GlobalMergeActivityMaster::process exit"); }
virtual CDateTime & getPasswordExpiration(CDateTime& expirationDate){ assertex(false); return expirationDate; }
virtual bool setPasswordExpiration(CDateTime& expirationDate) { assertex(false);return true; }
virtual ISoapField* createField(const char *serviceName, IProperties *params, MapStrToBuf *attachments) { assertex(false); return NULL; }
StringBuffer& getXsdDefinition(IEspContext& ctx, const char *msgTypeName, StringBuffer &schema, wsdlIncludedTable &added, const char *xns="xsd", const char *wsns="tns", unsigned flags=1) { assertex(false); return schema; }
// form virtual StringBuffer& getHtmlForm(IEspContext &ctx, CHttpRequest* req, const char *serv, const char *method, StringBuffer &form, bool includeFormTag, const char *prefix) { assertex(false); return form; }
virtual void process() override { ActPrintLog("INDEXWRITE: Start"); init(); IRowStream *stream = inputStream; ThorDataLinkMetaInfo info; input->getMetaInfo(info); outRowAllocator.setown(getRowAllocator(helper->queryDiskRecordSize())); start(); if (refactor) { assertex(isLocal); if (active) { unsigned targetWidth = partDesc->queryOwner().numParts()-(buildTlk?1:0); assertex(0 == container.queryJob().querySlaves() % targetWidth); unsigned partsPerNode = container.queryJob().querySlaves() / targetWidth; unsigned myPart = queryJobChannel().queryMyRank(); IArrayOf<IRowStream> streams; streams.append(*LINK(stream)); --partsPerNode; // Should this be merging 1,11,21,31 etc. unsigned p=0; unsigned fromPart = targetWidth+1 + (partsPerNode * (myPart-1)); for (; p<partsPerNode; p++) { streams.append(*createRowStreamFromNode(*this, fromPart++, queryJobChannel().queryJobComm(), mpTag, abortSoon)); } ICompare *icompare = helper->queryCompare(); assertex(icompare); Owned<IRowLinkCounter> linkCounter = new CThorRowLinkCounter; myInputStream.setown(createRowStreamMerger(streams.ordinality(), streams.getArray(), icompare, false, linkCounter)); stream = myInputStream; } else // serve nodes, creating merged parts rowServer.setown(createRowServer(this, stream, queryJobChannel().queryJobComm(), mpTag)); } processed = THORDATALINK_STARTED; // single part key support // has to serially pull all data fron nodes 2-N // nodes 2-N, could/should start pushing some data (as it's supposed to be small) to cut down on serial nature. unsigned node = queryJobChannel().queryMyRank(); if (singlePartKey) { if (1 == node) { try { open(*partDesc, false, helper->queryDiskRecordSize()->isVariableSize()); for (;;) { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; if (abortSoon) return; processRow(row); } unsigned node = 2; while (node <= container.queryJob().querySlaves()) { Linked<IOutputRowDeserializer> deserializer = ::queryRowDeserializer(input); CMessageBuffer mb; Owned<ISerialStream> stream = createMemoryBufferSerialStream(mb); CThorStreamDeserializerSource rowSource; rowSource.setStream(stream); bool successSR; for (;;) { { BooleanOnOff tf(receivingTag2); successSR = queryJobChannel().queryJobComm().sendRecv(mb, node, mpTag2); } if (successSR) { if (rowSource.eos()) break; Linked<IEngineRowAllocator> allocator = ::queryRowAllocator(input); do { RtlDynamicRowBuilder rowBuilder(allocator); size32_t sz = deserializer->deserialize(rowBuilder, rowSource); OwnedConstThorRow fRow = rowBuilder.finalizeRowClear(sz); processRow(fRow); } while (!rowSource.eos()); } } node++; } } catch (CATCHALL) { close(*partDesc, partCrc, true); throw; } close(*partDesc, partCrc, true); stop(); } else { CMessageBuffer mb; CMemoryRowSerializer mbs(mb); Linked<IOutputRowSerializer> serializer = ::queryRowSerializer(input); for (;;) { BooleanOnOff tf(receivingTag2); if (queryJobChannel().queryJobComm().recv(mb, 1, mpTag2)) // node 1 asking for more.. { if (abortSoon) break; mb.clear(); do { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; serializer->serialize(mbs, (const byte *)row.get()); } while (mb.length() < SINGLEPART_KEY_TRANSFER_SIZE); // NB: at least one row if (!queryJobChannel().queryJobComm().reply(mb)) throw MakeThorException(0, "Failed to send index data to node 1, from node %d", node); if (0 == mb.length()) break; } } } } else { if (!refactor || active) { try { StringBuffer partFname; getPartFilename(*partDesc, 0, partFname); ActPrintLog("INDEXWRITE: process: handling fname : %s", partFname.str()); open(*partDesc, false, helper->queryDiskRecordSize()->isVariableSize()); ActPrintLog("INDEXWRITE: write"); BooleanOnOff tf(receiving); if (!refactor || !active) receiving = false; do { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; processRow(row); } while (!abortSoon); ActPrintLog("INDEXWRITE: write level 0 complete"); } catch (CATCHALL) { close(*partDesc, partCrc, isLocal && !buildTlk && 1 == node); throw; } close(*partDesc, partCrc, isLocal && !buildTlk && 1 == node); stop(); ActPrintLog("INDEXWRITE: Wrote %" RCPF "d records", processed & THORDATALINK_COUNT_MASK); if (buildTlk) { ActPrintLog("INDEXWRITE: sending rows"); NodeInfoArray tlkRows; CMessageBuffer msg; if (firstNode()) { if (processed & THORDATALINK_COUNT_MASK) { if (enableTlkPart0) tlkRows.append(* new CNodeInfo(0, firstRow.get(), firstRowSize, totalCount)); tlkRows.append(* new CNodeInfo(1, lastRow.get(), lastRowSize, totalCount)); } } else { if (processed & THORDATALINK_COUNT_MASK) { CNodeInfo row(queryJobChannel().queryMyRank(), lastRow.get(), lastRowSize, totalCount); row.serialize(msg); } queryJobChannel().queryJobComm().send(msg, 1, mpTag); } if (firstNode()) { ActPrintLog("INDEXWRITE: Waiting on tlk to complete"); // JCSMORE if refactor==true, is rowsToReceive here right?? unsigned rowsToReceive = (refactor ? (tlkDesc->queryOwner().numParts()-1) : container.queryJob().querySlaves()) -1; // -1 'cos got my own in array already ActPrintLog("INDEXWRITE: will wait for info from %d slaves before writing TLK", rowsToReceive); while (rowsToReceive--) { msg.clear(); receiveMsg(msg, RANK_ALL, mpTag); // NH->JCS RANK_ALL_OTHER not supported for recv if (abortSoon) return; if (msg.length()) { CNodeInfo *ni = new CNodeInfo(); ni->deserialize(msg); tlkRows.append(*ni); } } tlkRows.sort(CNodeInfo::compare); StringBuffer path; getPartFilename(*tlkDesc, 0, path); ActPrintLog("INDEXWRITE: creating toplevel key file : %s", path.str()); try { open(*tlkDesc, true, helper->queryDiskRecordSize()->isVariableSize()); if (tlkRows.length()) { CNodeInfo &lastNode = tlkRows.item(tlkRows.length()-1); memset(lastNode.value, 0xff, lastNode.size); } ForEachItemIn(idx, tlkRows) { CNodeInfo &info = tlkRows.item(idx); builder->processKeyData((char *)info.value, info.pos, info.size); } close(*tlkDesc, tlkCrc, true); } catch (CATCHALL) { abortSoon = true; close(*tlkDesc, tlkCrc, true); removeFiles(*partDesc); throw; } } } else if (!isLocal && firstNode())
void process() { ActPrintLog("INDEXWRITE: Start"); init(); ThorDataLinkMetaInfo info; inputs.item(0)->getMetaInfo(info); outRowAllocator.setown(queryJob().getRowAllocator(helper->queryDiskRecordSize(), container.queryId())); if (refactor) { assertex(isLocal); input.setown(createDataLinkSmartBuffer(this, inputs.item(0), INDEXWRITE_SMART_BUFFER_SIZE, true, false, RCUNBOUND, this, false, &container.queryJob().queryIDiskUsage())); startInput(input); if (active) { unsigned targetWidth = partDesc->queryOwner().numParts()-(buildTlk?1:0); assertex(0 == container.queryJob().querySlaves() % targetWidth); unsigned partsPerNode = container.queryJob().querySlaves() / targetWidth; unsigned myPart = container.queryJob().queryMyRank(); IArrayOf<IRowStream> streams; streams.append(*LINK(input)); --partsPerNode; // Should this be merging 1,11,21,31 etc. unsigned p=0; unsigned fromPart = targetWidth+1 + (partsPerNode * (myPart-1)); for (; p<partsPerNode; p++) { streams.append(*createRowStreamFromNode(*this, fromPart++, container.queryJob().queryJobComm(), mpTag, abortSoon)); } ICompare *icompare = helper->queryCompare(); assertex(icompare); Owned<IRowLinkCounter> linkCounter = new CThorRowLinkCounter; input.setown(createRowStreamToDataLinkAdapter(inputs.item(0), createRowStreamMerger(streams.ordinality(), streams.getArray(), icompare, false, linkCounter))); } else // serve nodes, creating merged parts rowServer.setown(createRowServer(this, input, container.queryJob().queryJobComm(), mpTag)); } else if (singlePartKey) { input.setown(createDataLinkSmartBuffer(this, inputs.item(0), INDEXWRITE_SMART_BUFFER_SIZE, true, false, RCUNBOUND, this, false, &container.queryJob().queryIDiskUsage())); startInput(input); } else { input.set(inputs.item(0)); startInput(input); } processed = THORDATALINK_STARTED; // single part key support // has to serially pull all data fron nodes 2-N // nodes 2-N, could/should start pushing some data (as it's supposed to be small) to cut down on serial nature. unsigned node = container.queryJob().queryMyRank(); if (singlePartKey) { if (1 == node) { try { open(*partDesc, false, helper->queryDiskRecordSize()->isVariableSize()); loop { OwnedConstThorRow row = input->ungroupedNextRow(); if (!row) break; if (abortSoon) return; processRow(row); } unsigned node = 2; while (node <= container.queryJob().querySlaves()) { Linked<IOutputRowDeserializer> deserializer = ::queryRowDeserializer(input); CMessageBuffer mb; Owned<ISerialStream> stream = createMemoryBufferSerialStream(mb); CThorStreamDeserializerSource rowSource; rowSource.setStream(stream); bool successSR; loop { { BooleanOnOff tf(receivingTag2); successSR = container.queryJob().queryJobComm().sendRecv(mb, node, mpTag2); } if (successSR) { if (rowSource.eos()) break; Linked<IEngineRowAllocator> allocator = ::queryRowAllocator(input); do { RtlDynamicRowBuilder rowBuilder(allocator); size32_t sz = deserializer->deserialize(rowBuilder, rowSource); OwnedConstThorRow fRow = rowBuilder.finalizeRowClear(sz); processRow(fRow); } while (!rowSource.eos()); } } node++; } } catch (CATCHALL) { close(*partDesc, partCrc, true); throw; } close(*partDesc, partCrc, true); doStopInput(); } else { CMessageBuffer mb; CMemoryRowSerializer mbs(mb); Linked<IOutputRowSerializer> serializer = ::queryRowSerializer(input); loop { BooleanOnOff tf(receivingTag2); if (container.queryJob().queryJobComm().recv(mb, 1, mpTag2)) // node 1 asking for more.. { if (abortSoon) break; mb.clear(); do { OwnedConstThorRow row = input->ungroupedNextRow(); if (!row) break; serializer->serialize(mbs, (const byte *)row.get()); } while (mb.length() < SINGLEPART_KEY_TRANSFER_SIZE); // NB: at least one row if (!container.queryJob().queryJobComm().reply(mb)) throw MakeThorException(0, "Failed to send index data to node 1, from node %d", node); if (0 == mb.length()) break; } } } }
virtual IError * mapError(IError * error) { Owned<IError> mappedError = primary->mapError(error); assertex(mappedError == error); // should not expect any mapping below a compound. return mappedError.getClear(); }
void init(MemoryBuffer &data, MemoryBuffer &slaveData) { isLocal = 0 != (TIWlocal & helper->getFlags()); mpTag = container.queryJob().deserializeMPTag(data); mpTag2 = container.queryJob().deserializeMPTag(data); data.read(active); if (active) { data.read(logicalFilename); partDesc.setown(deserializePartFileDescriptor(data)); } data.read(fileSize); data.read(singlePartKey); data.read(refactor); if (singlePartKey) buildTlk = false; else { data.read(buildTlk); if (firstNode()) { if (buildTlk) tlkDesc.setown(deserializePartFileDescriptor(data)); else if (!isLocal) // exising tlk then.. { OwnedRoxieString diName(helper->getDistributeIndexName()); assertex(diName.get()); tlkDesc.setown(deserializePartFileDescriptor(data)); unsigned c; data.read(c); while (c--) { RemoteFilename rf; rf.deserialize(data); if (!existingTlkIFile) { Owned<IFile> iFile = createIFile(rf); if (iFile->exists()) existingTlkIFile.set(iFile); } } if (!existingTlkIFile) throw MakeThorException(TE_FileNotFound, "Top level key part does not exist, for key: %s", diName.get()); } } } IOutputMetaData * diskSize = helper->queryDiskRecordSize(); assertex(!(diskSize->getMetaFlags() & MDFneedserializedisk)); if (diskSize->isVariableSize()) { if (TIWmaxlength & helper->getFlags()) maxDiskRecordSize = helper->getMaxKeySize(); else maxDiskRecordSize = KEYBUILD_MAXLENGTH; //Current default behaviour, could be improved in the future } else maxDiskRecordSize = diskSize->getFixedSize(); reportOverflow = false; }
CREcheck(bool &_busy) : busy(_busy) { assertex(!busy); busy = true; }
IPropertyTree & query() { assertex(iter); return iter->query(); }
bool SuperHashTable::matchesElement(const void *et, const void *searchET) const { assertex(!"SuperHashTable::matchesElement needs to be overridden"); return false; }
// field factory virtual ISoapField* createField(const char* name) { assertex(false); return NULL; }
Decimal & Decimal::divide(const Decimal & other) { //NB: Round towards zero int lo1, hi1, lo2, hi2; clip(lo1, hi1); other.clip(lo2, hi2); int nd1 = hi1+1-lo1; int nd2 = hi2+1-lo2; int hi = (hi1-hi2)+zeroDigit; int iters = hi+1; if (hi < 0) { setZero(); return *this; } if (hi2 < lo2) { //Division by zero defined to return 0 instead of throw an exception setZero(); return *this; } lsb = 0; msb = hi >= maxDigits ? maxDigits-1 : hi; const byte spare = 2; byte temp[maxDigits*2 + 3]; unsigned numeratorDigits = hi + 1 + nd2; memset(temp, 0, numeratorDigits+spare); // ensure two zero in msb, and below lsb. Also 2 zeros for looking 2 bytes ahead.. byte * numerator = temp+spare; if (numeratorDigits > nd1) memcpy(numerator + numeratorDigits - 1 - nd1, digits+lo1, nd1); else memcpy(numerator, digits + hi1 + 1 - (numeratorDigits-1), numeratorDigits-1); unsigned divisor01 = other.digits[hi2] * 10; if (hi2 != lo2) divisor01 += other.digits[hi2-1]; //MORE: Terminate early for exact divide.. const byte * divisor = other.digits + lo2; for (int iter = iters; iter--; ) { //The following guess for q is never too small, may be 1 too large byte * curNumerator = numerator + iter; unsigned numerator012 = curNumerator[nd2] * 100 + curNumerator[nd2-1] * 10 + curNumerator[nd2-2]; unsigned q = numerator012 / divisor01; if (q == 10) q--; if (q) { unsigned carry = 0; for (int i = 0; i < nd2; i++) { int next = 90 + curNumerator[i] - divisor[i] * q - carry; div_t values = div(next, 10); carry = 9 - values.quot; curNumerator[i] = values.rem; } carry -= curNumerator[nd2]; if (carry) { q--; assertex(carry==1); carry = 0; for (int i = 0; i < nd2; i++) { byte next = curNumerator[i] + divisor[i] + carry; carry = 0; if (next >= 10) { next -= 10; carry = 1; } curNumerator[i] = next; } assertex(carry); } } if (iter < maxDigits) digits[iter] = q; } //MORE: This should really calculate the next digit, and conditionally round the least significant digit. negative ^= other.negative; return *this; }
void CWriteMasterBase::init() { published = false; recordsProcessed = 0; bool mangle = 0 != (diskHelperBase->getFlags() & (TDXtemporary|TDXjobtemp)); OwnedRoxieString helperFileName = diskHelperBase->getFileName(); StringBuffer expandedFileName; queryThorFileManager().addScope(container.queryJob(), helperFileName, expandedFileName, mangle); fileName.set(expandedFileName); dlfn.set(fileName); if (diskHelperBase->getFlags() & TDWextend) { assertex(0 == (diskHelperBase->getFlags() & (TDXtemporary|TDXjobtemp))); Owned<IDistributedFile> file = queryThorFileManager().lookup(container.queryJob(), helperFileName, false, true); if (file.get()) { fileDesc.setown(file->getFileDescriptor()); queryThorFileManager().noteFileRead(container.queryJob(), file, true); } } if (dlfn.isExternal()) mpTag = container.queryJob().allocateMPTag(); // used if (NULL == fileDesc.get()) { bool overwriteok = 0!=(TDWoverwrite & diskHelperBase->getFlags()); unsigned idx=0; while (true) { OwnedRoxieString cluster(diskHelperBase->getCluster(idx)); if(!cluster) break; clusters.append(cluster); idx++; } IArrayOf<IGroup> groups; fillClusterArray(container.queryJob(), fileName, clusters, groups); fileDesc.setown(queryThorFileManager().create(container.queryJob(), fileName, clusters, groups, overwriteok, diskHelperBase->getFlags())); if (1 == groups.ordinality()) targetOffset = getGroupOffset(groups.item(0), container.queryJob().querySlaveGroup()); IPropertyTree &props = fileDesc->queryProperties(); if (diskHelperBase->getFlags() & (TDWowned|TDXjobtemp|TDXtemporary)) props.setPropBool("@owned", true); if (diskHelperBase->getFlags() & TDWresult) props.setPropBool("@result", true); const char *rececl= diskHelperBase->queryRecordECL(); if (rececl&&*rececl) props.setProp("ECL", rececl); bool blockCompressed=false; void *ekey; size32_t ekeylen; diskHelperBase->getEncryptKey(ekeylen,ekey); if (ekeylen) { memset(ekey,0,ekeylen); free(ekey); props.setPropBool("@encrypted", true); blockCompressed = true; } else if (0 != (diskHelperBase->getFlags() & TDWnewcompress) || 0 != (diskHelperBase->getFlags() & TDXcompress)) blockCompressed = true; if (blockCompressed) props.setPropBool("@blockCompressed", true); props.setProp("@kind", "flat"); if (TAKdiskwrite == container.getKind() && (0 != (diskHelperBase->getFlags() & TDXtemporary)) && container.queryOwner().queryOwner() && (!container.queryOwner().isGlobal())) // I am in a child query { // do early, because this will be local act. and will not come back to master until end of owning graph. publish(); } } }