void CSteppedConjunctionOptimizer::finishCandidates() { rootActivity->resetEOF(); ForEachItemIn(i1, inputs) inputs.item(i1).setRestriction(NULL, 0); ForEachItemIn(i2, joins) joins.item(i2).stopRestrictedJoin(); ForEachItemIn(i3, pseudoInputs) pseudoInputs.item(i3).clearPending(); if (prevEqualityRow) inputAllocator->releaseRow(prevEqualityRow); prevEqualityRow = equalityRow; equalityRow = NULL; }
void CHThorNaryActivity::done() { ForEachItemIn(i, expandedInputs) expandedInputs.item(i)->done(); expandedInputs.kill(); CHThorMultiInputActivity::done(); }
void VarElemArray::transfer(VarElemArray &from) { clear(); ForEachItemIn(i,from) appendLink(from.item(i)); from.clear(); }
virtual void flushTraceSummary() { StringBuffer logstr; logstr.appendf("activeReqs=").append(m_active).append(';'); logstr.append("user="******"exception@%dms=%d;", m_exceptionTime, m_exceptionCode); } StringBuffer value; value.append("total=").appendulong(m_processingTime).append("ms"); if (m_hasException || (getEspLogLevel() > LogNormal)) { m_traceValues.append(value.str()); if (m_traceValues.length()) { ForEachItemIn(idx, m_traceValues) logstr.append(m_traceValues.item(idx)).append(";"); m_traceValues.kill(); } } else { logstr.appendf("%s;", value.str()); } DBGLOG("TxSummary[%s]", logstr.str()); }
virtual void waitForStrands() { producerStopSem.signal(threads.ordinality()); ForEachItemIn(i, threads) threads.item(i).join(); threads.kill(); }
void CachedPasswordProvider::serialize(MemoryBuffer & out) { unsigned num = passwords.ordinality(); out.append(num); ForEachItemIn(idx, passwords) passwords.item(idx).serialize(out); }
virtual int processCMD() { Owned<IClientWsPackageProcess> packageProcessClient = createCmdClient(WsPackageProcess, *this); StringBuffer pkgInfo; pkgInfo.loadFile(optFileName); fprintf(stdout, "\n ... adding package map %s now\n\n", optFileName.sget()); Owned<IClientAddPackageRequest> request = packageProcessClient->createAddPackageRequest(); request->setActivate(optActivate); request->setInfo(pkgInfo); request->setTarget(optTarget); request->setPackageMap(optPackageMapId); request->setProcess(optProcess); request->setDaliIp(optDaliIP); request->setOverWrite(optOverWrite); request->setGlobalScope(optGlobalScope); request->setSourceProcess(optSourceProcess); Owned<IClientAddPackageResponse> resp = packageProcessClient->AddPackage(request); if (resp->getExceptions().ordinality()) outputMultiExceptions(resp->getExceptions()); StringArray ¬Found = resp->getFilesNotFound(); if (notFound.length()) { fputs("\nFiles defined in package but not found in DFS:\n", stderr); ForEachItemIn(i, notFound) fprintf(stderr, " %s\n", notFound.item(i)); fputs("\n", stderr); } return 0; }
//Wait for all active producers to complete - including calling stop on their inputs void waitForProducers() { producerStopSem.signal(numProducers); ForEachItemIn(i, threads) threads.item(i).join(); threads.kill(); }
//Stop producers that have already been started() virtual void stopActiveProducers() { producerStopSem.signal(numProducers); ForEachItemIn(i, threads) threads.item(i).join(); threads.kill(); }
unsigned getNumUniqueExpressions(const HqlExprArray & exprs) { TransformMutexBlock block; ExpressionStatsInfo info; ForEachItemIn(i, exprs) calcNumUniqueExpressions(&exprs.item(i),info); return info.count; }
void CCassandraLogAgent::createTable(const char *dbName, const char *tableName, StringArray& columnNames, StringArray& columnTypes, const char* keys) { StringBuffer fields; ForEachItemIn(i, columnNames) fields.appendf("%s %s,", columnNames.item(i), columnTypes.item(i)); VStringBuffer createTableSt("CREATE TABLE IF NOT EXISTS %s.%s (%s PRIMARY KEY (%s));", dbName, tableName, fields.str(), keys); executeSimpleStatement(createTableSt.str()); }
void ScopeConsistencyChecker::checkConsistent(IHqlExpression * root, const HqlExprArray & _activeTables) { ForEachItemIn(i, _activeTables) activeTables.append(OLINK(_activeTables.item(i))); if (root->isDataset()) pushScope(); analyse(root, 0); if (root->isDataset()) popScope(); }
void CSlavePartMapping::getParts(unsigned i, IArrayOf<IPartDescriptor> &parts) { if (local) i = 0; if (i>=maps.ordinality()) return; CSlaveMap &map = maps.item(i); ForEachItemIn(m, map) parts.append(*LINK(&map.item(m))); }
void LogicalGraphCreator::createLogicalGraph(HqlExprArray & exprs) { graph.setown(createPTree("graph")); // beginSubGraph(NULL, false); ForEachItemIn(i, exprs) createRootGraphActivity(&exprs.item(i)); // endSubGraph(); wu->createGraph("Logical", NULL, GraphTypeEcl, graph.getClear(), 0); }
virtual int processCMD() { Owned<IClientWsPackageProcess> packageProcessClient = getWsPackageSoapService(optServer, optPort, optUsername, optPassword); Owned<IClientGetQueryFileMappingRequest> request = packageProcessClient->createGetQueryFileMappingRequest(); request->setTarget(optTarget); request->setQueryName(optQueryId); request->setPMID(optPMID); request->setGlobalScope(optGlobalScope); Owned<IClientGetQueryFileMappingResponse> resp = packageProcessClient->GetQueryFileMapping(request); if (resp->getExceptions().ordinality()>0) outputMultiExceptions(resp->getExceptions()); StringArray &unmappedFiles = resp->getUnmappedFiles(); if (!unmappedFiles.ordinality()) fputs("No undefined files found.\n", stderr); else { fputs("Files not defined in PackageMap:\n", stderr); ForEachItemIn(i, unmappedFiles) fprintf(stderr, " %s\n", unmappedFiles.item(i)); } IArrayOf<IConstSuperFile> &superFiles = resp->getSuperFiles(); if (!superFiles.ordinality()) fputs("\nNo matching SuperFiles found in PackageMap.\n", stderr); else { fputs("\nSuperFiles defined in PackageMap:\n", stderr); ForEachItemIn(i, superFiles) { IConstSuperFile &super = superFiles.item(i); fprintf(stderr, " %s\n", super.getName()); StringArray &subfiles = super.getSubFiles(); if (subfiles.ordinality()>0) { ForEachItemIn(sbi, subfiles) fprintf(stderr, " > %s\n", subfiles.item(sbi)); } } }
void LogicalGraphCreator::createLogicalGraph(HqlExprArray & exprs) { graph.setown(createPTree("graph")); // beginSubGraph(NULL, false); ForEachItemIn(i, exprs) createRootGraphActivity(&exprs.item(i)); // endSubGraph(); Owned<IWUGraph> wug = wu->updateGraph("Logical"); wug->setXGMMLTree(graph.getClear()); wug->setType(GraphTypeEcl); }
void CDiskReadMasterBase::serializeSlaveData(MemoryBuffer &dst, unsigned slave) { IHThorDiskReadBaseArg *helper = (IHThorDiskReadBaseArg *) queryHelper(); dst.append(helper->getFileName()); dst.append(subfileLogicalFilenames.ordinality()); if (subfileLogicalFilenames.ordinality()) { ForEachItemIn(s, subfileLogicalFilenames) dst.append(subfileLogicalFilenames.item(s)); } if (mapping) mapping->serializeMap(slave, dst); else CSlavePartMapping::serializeNullMap(dst); }
static StringBuffer &appendJSONExceptions(StringBuffer &s, IMultiException *e, const char *objname="Exceptions", const char *arrayName = "Exception") { if (!e) return s; if (objname && *objname) appendJSONName(s, objname).append('{'); if (arrayName && *arrayName) appendJSONName(s, arrayName).append('['); ForEachItemIn(i, *e) appendJSONException(s, &e->item(i), NULL, NULL); if (arrayName && *arrayName) s.append(']'); if (objname && *objname) s.append('}'); return s; }
void CWSESPControlEx::cleanSessions(bool allSessions, const char* _id, const char* _userID, const char* _fromIP) { StringBuffer searchPath; setSessionXPath(allSessions, _id, _userID, _fromIP, searchPath); Owned<IRemoteConnection> globalLock = querySDSConnectionForESPSession(RTM_LOCK_WRITE, SESSION_SDS_LOCK_TIMEOUT); Owned<IPropertyTreeIterator> iter = globalLock->queryRoot()->getElements("*"); ForEach(*iter) { IArrayOf<IPropertyTree> toRemove; Owned<IPropertyTreeIterator> iter1 = iter->query().getElements(searchPath.str()); ForEach(*iter1) toRemove.append(*LINK(&iter1->query())); ForEachItemIn(i, toRemove) iter->query().removeTree(&toRemove.item(i)); } }
void CSteppedConjunctionOptimizer::beforeProcessing() { //NB: This function is only called once, after we have decided it is worth processing. assertex(!eof); // just check it isn't called more than once assertex(numInputs); bool hasDistance = (helper.getJoinFlags() & IHThorNWayMergeJoinArg::MJFhasdistance) != 0; for (unsigned i3 = 0; i3 < numInputs; i3++) { OrderedInput & next = *new OrderedInput(inputs.item(i3), i3, hasDistance); orderedInputs.append(next); if (next.canOptimizeOrder()) numOptimizeInputs++; } //Sort so that inputs are ordered (priority-inputs, optimizable, non-optimizable) orderedInputs.sort(compareInitialInputOrder); //If only a single re-orderable input, treat it as unorderable. if (numOptimizeInputs == 1) { assertex(orderedInputs.item(numPriorityInputs).canOptimizeOrder()); orderedInputs.item(numPriorityInputs).stopOptimizeOrder(); numOptimizeInputs = 0; } maxOptimizeInput = numPriorityInputs + numOptimizeInputs; associateRemoteInputs(orderedInputs, numPriorityInputs); //MORE: If some inputs have known priority, and other remote inputs don't, then we could consider // connecting the unknown inputs to the last known inputs. ForEachItemIn(i4, joins) joins.item(i4).markRestrictedJoin(numEqualFields); assertex(helper.getJoinFlags() & IHThorNWayMergeJoinArg::MJFhasclearlow); // Don't support (very) old workunits that don't define this.. if (helper.getJoinFlags() & IHThorNWayMergeJoinArg::MJFhasclearlow) { RtlDynamicRowBuilder rowBuilder(inputAllocator); size32_t size = helper.createLowInputRow(rowBuilder); lowestSeekRow = rowBuilder.finalizeRowClear(size); } }
int CEclDirectSoapBindingEx::sendRunEclExForm(IEspContext &context, CHttpRequest* request, CHttpResponse* response) { StringBuffer xml; xml.append("<RunEclEx clientVersion='").append(context.getClientVersion()).append("'>"); appendXMLTag(xml, "UseEclRepository", (supportRepository) ? "Yes" : "No"); appendXMLTag(xml, "Redirect", (redirect) ? "Yes" : "No"); appendXMLTag(xml, "IncludeResults", (redirect) ? "No" : "Yes"); ForEachItemIn(i, clusters) appendXMLTag(xml, "Cluster", clusters.item(i)); xml.append("</RunEclEx>"); StringBuffer xslt(getCFD()); xslt.append("./smc_xslt/run_ecl.xslt"); StringBuffer html; xsltTransform(xml.str(), xslt.str(), NULL, html); response->setContent(html.str()); response->setContentType(HTTP_TYPE_TEXT_HTML_UTF8); response->send(); return 0; }
CKeyedJoinMaster(CMasterGraphElement *info) : CMasterActivity(info) { helper = (IHThorKeyedJoinArg *) queryHelper(); progressLabels.append("seeks"); progressLabels.append("scans"); progressLabels.append("accepted"); progressLabels.append("postfiltered"); progressLabels.append("prefiltered"); if (helper->diskAccessRequired()) { progressLabels.append("diskSeeks"); progressLabels.append("diskAccepted"); progressLabels.append("diskRejected"); } ForEachItemIn(l, progressLabels) progressInfoArr.append(*new ProgressInfo); localKey = false; numTags = 0; tags[0] = tags[1] = tags[2] = tags[3] = TAG_NULL; reInit = 0 != (helper->getFetchFlags() & (FFvarfilename|FFdynamicfilename)); }
void CSlavePartMapping::serializeMap(unsigned i, MemoryBuffer &mb, IGetSlaveData *extra) { if (local) i = 0; if (i >= maps.ordinality()) { mb.append((unsigned)0); return; } CSlaveMap &map = maps.item(i); unsigned nPos = mb.length(); unsigned n=0; mb.append(n); UnsignedArray parts; ForEachItemIn(m, map) parts.append(map.item(m).queryPartIndex()); MemoryBuffer extraMb; if (extra) { ForEachItemIn(m2, map) { unsigned xtraLen = 0; unsigned xtraPos = extraMb.length(); extraMb.append(xtraLen); IPartDescriptor &partDesc = map.item(m2); if (!extra->getData(m2, partDesc.queryPartIndex(), extraMb)) { parts.zap(partDesc.queryPartIndex()); extraMb.rewrite(xtraPos); } else { xtraLen = (extraMb.length()-xtraPos)-sizeof(xtraLen); extraMb.writeDirect(xtraPos, sizeof(xtraLen), &xtraLen); } } }
CKeyedJoinMaster(CMasterGraphElement *info) : CMasterActivity(info) { helper = (IHThorKeyedJoinArg *) queryHelper(); //GH->JCS a bit wasteful creating this array each time. progressKinds.append(StNumIndexSeeks); progressKinds.append(StNumIndexScans); progressKinds.append(StNumIndexAccepted); progressKinds.append(StNumPostFiltered); progressKinds.append(StNumPreFiltered); if (helper->diskAccessRequired()) { progressKinds.append(StNumDiskSeeks); progressKinds.append(StNumDiskAccepted); progressKinds.append(StNumDiskRejected); } ForEachItemIn(l, progressKinds) progressInfoArr.append(*new ProgressInfo); localKey = false; numTags = 0; tags[0] = tags[1] = tags[2] = tags[3] = TAG_NULL; reInit = 0 != (helper->getFetchFlags() & (FFvarfilename|FFdynamicfilename)); remoteDataFiles = false; }
void displayPartition(PartitionPointArray & partition) { LOG(MCdebugInfoDetail, unknownJob, "Partition:"); ForEachItemIn(idx, partition) partition.item(idx).display(); }
void CSteppedConjunctionOptimizer::beforeProcessing() { //NB: This function is only called once, after we have decided it is worth processing. assertex(!eof); // just check it isn't called more than once assertex(numInputs); bool hasDistance = (helper.getJoinFlags() & IHThorNWayMergeJoinArg::MJFhasdistance) != 0; for (unsigned i3 = 0; i3 < numInputs; i3++) { OrderedInput & next = *new OrderedInput(inputs.item(i3), i3, hasDistance); orderedInputs.append(next); if (next.canOptimizeOrder()) numOptimizeInputs++; } //Sort so that inputs are ordered (priority-inputs, optimizable, non-optimizable) orderedInputs.sort(compareInitialInputOrder); //If only a single re-orderable input, treat it as unorderable. if (numOptimizeInputs == 1) { assertex(orderedInputs.item(numPriorityInputs).canOptimizeOrder()); orderedInputs.item(numPriorityInputs).stopOptimizeOrder(); numOptimizeInputs = 0; } maxOptimizeInput = numPriorityInputs + numOptimizeInputs; //If we know for sure the primary input, then tag it as worth reading ahead - otherwise it will be dynamically set later. if (numPriorityInputs > 0) { orderedInputs.item(0).setReadAhead(true); orderedInputs.item(0).setAlwaysReadExact(); } //Work out the last input of known priority whis is read remotely. unsigned maxPriorityRemote = numPriorityInputs; while ((maxPriorityRemote >= 2) && !orderedInputs.item(maxPriorityRemote-1).readsRowsRemotely()) maxPriorityRemote--; //If the second ordered input is known to be read remotely, then we want to send multiple seek requests at the same time. //MORE: Maybe we should consider doing this to all other inputs if only one priority input is known. if (maxPriorityRemote >= 2) { for (unsigned i=1; i < maxPriorityRemote; i++) { IMultipleStepSeekInfo * seekInfo = orderedInputs.item(i-1).createMutipleReadWrapper(); orderedInputs.item(i).createMultipleSeekWrapper(seekInfo); } } //MORE: If some inputs have known priroity, and other remote inputs don't, then we could consider // connecting the unknown inputs to the last known inputs. ForEachItemIn(i4, joins) joins.item(i4).markRestrictedJoin(numEqualFields); assertex(helper.getJoinFlags() & IHThorNWayMergeJoinArg::MJFhasclearlow); // Don't support (very) old workunits that don't define this.. if (helper.getJoinFlags() & IHThorNWayMergeJoinArg::MJFhasclearlow) { RtlDynamicRowBuilder rowBuilder(inputAllocator); size32_t size = helper.createLowInputRow(rowBuilder); lowestSeekRow = rowBuilder.finalizeRowClear(size); } }
void displayProgress(OutputProgressArray & progress) { LOG(MCdebugInfoDetail, unknownJob, "Progress:"); ForEachItemIn(idx, progress) progress.item(idx).trace(); }
void TransferServer::deserializeAction(MemoryBuffer & msg, unsigned action) { SocketEndpoint ep; ep.deserialize(msg); if (!ep.isLocal()) { StringBuffer host, expected; queryHostIP().getIpText(host); ep.getIpText(expected); throwError2(DFTERR_WrongComputer, expected.str(), host.str()); } srcFormat.deserialize(msg); tgtFormat.deserialize(msg); msg.read(calcInputCRC); msg.read(calcOutputCRC); deserialize(partition, msg); msg.read(numParallelSlaves); msg.read(updateFrequency); msg.read(replicate); msg.read(mirror); msg.read(isSafeMode); srand((unsigned)get_cycles_now()); int adjust = (rand() * rand() * rand()) % updateFrequency - (updateFrequency/2); lastTick = msTick() + adjust; StringBuffer localFilename; if (action == FTactionpull) { partition.item(0).outputName.getPath(localFilename); LOG(MCdebugProgress, unknownJob, "Process Pull Command: %s", localFilename.str()); } else { partition.item(0).inputName.getPath(localFilename); LOG(MCdebugProgress, unknownJob, "Process Push Command: %s", localFilename.str()); } LOG(MCdebugProgress, unknownJob, "Num Parallel Slaves=%d Adjust=%d/%d", numParallelSlaves, adjust, updateFrequency); LOG(MCdebugProgress, unknownJob, "replicate(%d) mirror(%d) safe(%d) incrc(%d) outcrc(%d)", replicate, mirror, isSafeMode, calcInputCRC, calcOutputCRC); displayPartition(partition); unsigned numProgress; msg.read(numProgress); for (unsigned i = 0; i < numProgress; i++) { OutputProgress & next = *new OutputProgress; next.deserializeCore(msg); progress.append(next); } if (msg.remaining()) msg.read(throttleNicSpeed); if (msg.remaining()) msg.read(compressedInput).read(compressOutput); if (msg.remaining()) msg.read(copyCompressed); if (msg.remaining()) msg.read(transferBufferSize); if (msg.remaining()) msg.read(encryptKey).read(decryptKey); if (msg.remaining()) { srcFormat.deserializeExtra(msg, 1); tgtFormat.deserializeExtra(msg, 1); } ForEachItemIn(i1, progress) progress.item(i1).deserializeExtra(msg, 1); LOG(MCdebugProgress, unknownJob, "throttle(%d), transferBufferSize(%d)", throttleNicSpeed, transferBufferSize); PROGLOG("compressedInput(%d), compressedOutput(%d), copyCompressed(%d)", compressedInput?1:0, compressOutput?1:0, copyCompressed?1:0); PROGLOG("encrypt(%d), decrypt(%d)", encryptKey.isEmpty()?0:1, decryptKey.isEmpty()?0:1); //---Finished deserializing --- displayProgress(progress); totalLengthRead = 0; totalLengthToRead = 0; ForEachItemIn(idx, partition) totalLengthToRead += partition.item(idx).inputLength; }
void serialize(PartitionPointArray & partition, MemoryBuffer & out) { out.append(partition.ordinality()); ForEachItemIn(idx, partition) partition.item(idx).serialize(out); }
virtual int processCMD() { Owned<IClientWsPackageProcess> packageProcessClient = getWsPackageSoapService(optServer, optPort, optUsername, optPassword); Owned<IClientValidatePackageRequest> request = packageProcessClient->createValidatePackageRequest(); if (optFileName.length()) { StringBuffer pkgInfo; pkgInfo.loadFile(optFileName); fprintf(stdout, "\nvalidating packagemap file %s\n\n", optFileName.sget()); request->setInfo(pkgInfo); } request->setActive(optValidateActive); request->setPMID(optPMID); request->setTarget(optTarget); request->setQueryIdToVerify(optQueryId); request->setCheckDFS(optCheckDFS); request->setGlobalScope(optGlobalScope); bool validateMessages = false; Owned<IClientValidatePackageResponse> resp = packageProcessClient->ValidatePackage(request); if (resp->getExceptions().ordinality()>0) { validateMessages = true; outputMultiExceptions(resp->getExceptions()); } StringArray &errors = resp->getErrors(); if (errors.ordinality()>0) { validateMessages = true; fputs(" Error(s):\n", stderr); ForEachItemIn(i, errors) fprintf(stderr, " %s\n", errors.item(i)); } StringArray &warnings = resp->getWarnings(); if (warnings.ordinality()>0) { validateMessages = true; fputs(" Warning(s):\n", stderr); ForEachItemIn(i, warnings) fprintf(stderr, " %s\n", warnings.item(i)); } StringArray &unmatchedQueries = resp->getQueries().getUnmatched(); if (unmatchedQueries.ordinality()>0) { validateMessages = true; fputs("\n Queries without matching package:\n", stderr); ForEachItemIn(i, unmatchedQueries) fprintf(stderr, " %s\n", unmatchedQueries.item(i)); } StringArray &unusedPackages = resp->getPackages().getUnmatched(); if (unusedPackages.ordinality()>0) { validateMessages = true; fputs("\n Packages without matching queries:\n", stderr); ForEachItemIn(i, unusedPackages) fprintf(stderr, " %s\n", unusedPackages.item(i)); } StringArray &unusedFiles = resp->getFiles().getUnmatched(); if (unusedFiles.ordinality()>0) { fputs("\n Files without matching package definitions:\n", stderr); ForEachItemIn(i, unusedFiles) fprintf(stderr, " %s\n", unusedFiles.item(i)); } StringArray ¬InDFS = resp->getFiles().getNotInDFS(); if (notInDFS.ordinality()>0) { fputs("\n Packagemap SubFiles not found in DFS:\n", stderr); ForEachItemIn(i, notInDFS) fprintf(stderr, " %s\n", notInDFS.item(i)); } if (!validateMessages) fputs(" Validation was successful\n", stdout); return 0; }