inline void Linked<C>::insert_after(Linked<C>& x) { // *p -> *this -> x -> *n x.set_prev(*this); x.set_next(this->next()); next().set_prev(x); set_next(x); }
ILoadedDllEntry * SafePluginMap::getPluginDll(const char *id, const char *version, bool checkVersion) { CriticalBlock b(crit); Linked<PluginDll> ret = static_cast<PluginDll *>(map.getValue(id)); if (ret && checkVersion) { if (!ret->checkVersion(version)) return NULL; } return ret.getLink(); }
virtual void flush() { if (!flushdone) { out->flush(); offset_t end = out->getPosition(); writeidxofs(end); if (outidx) outidx->flush(); flushdone = true; } }
void convertExisting() { Linked<IPropertyTree> pmPart = pmExisting; const char *s = strstr(pmid.str(), "::"); if (s) pmPart->addProp("@id", s+2); packageMaps->removeTree(pmExisting); Owned<IPropertyTree> pmTree = createPTree("PackageMap", ipt_ordered); pmTree->setProp("@id", pmid); pmTree->setPropBool("@multipart", true); pmTree->addPropTree("Part", pmPart.getClear()); pmExisting = packageMaps->addPropTree("PackageMap", pmTree.getClear()); }
virtual void notify(SubscriptionId subid, const char *daliXpath, SDSNotifyFlags flags, unsigned valueLen, const void *valueData) { Linked<CDaliPackageWatcher> me = this; // Ensure that I am not released by the notify call (which would then access freed memory to release the critsec) Linked<ISDSSubscription> myNotifier; { CriticalBlock b(crit); if (traceLevel > 5) DBGLOG("Notification on %s (%s), %p", xpath.get(), daliXpath ? daliXpath : "", this); myNotifier.set(notifier); // allow crit to be released, allowing this to be unsubscribed, to avoid deadlocking when other threads via notify call unsubscribe } if (myNotifier) myNotifier->notify(subid, daliXpath, flags, valueLen, valueData); }
int main(int argc, char *argv[]) { String read="run -t -includelist linkdemo"; Linked<String> tokenlist; Linked<String> optionlist; char s[]="hello world"; String test=s; std::cout<<test; char str[50]; int i=0; std::cout<<read.length()<<std::endl; while(read[i]!='\0') { int j=0; if(read[i]!='-') { while(read[i]!=' ' && read[i]!='\0') str[j++]=read[i++]; str[j]='\0'; String temp=str; tokenlist.InsertAfter(temp); } else { while(read[i]!=' ' && read[i]!='\0') str[j++]=read[i++]; str[j]='\0'; String temp=str; //std::cout<<str<<endl; optionlist.InsertAfter(temp); //PrintLink(optionlist); } std::cout<<"|"<<str<<"|"<<endl; if(read[i]!='\0') i++; } PrintLink(tokenlist); //PrintLink(optionlist); /*Linked<String> l; String stre="hell world"; l.InsertAfter(stre); PrintLink(l);*/ std::system("pause"); return 0; }
void init() { StringBuffer xpath("Software/ThorCluster[@name=\""); xpath.append(clusterName).append("\"]"); Owned<IRemoteConnection> conn = querySDS().connect("/Environment", myProcessSession(), RTM_LOCK_READ, SDS_LOCK_TIMEOUT); environment.setown(createPTreeFromIPT(conn->queryRoot())); options = environment->queryPropTree(xpath.str()); if (!options) throwUnexpected(); groupName.set(options->queryProp("@nodeGroup")); if (groupName.isEmpty()) groupName.set(options->queryProp("@name")); VStringBuffer spareS("%s_spares", groupName.get()); spareGroupName.set(spareS); group.setown(queryNamedGroupStore().lookup(groupName)); spareGroup.setown(queryNamedGroupStore().lookup(spareGroupName)); }
void putRow(const void *_row) { offset_t start = out->getPosition(); OwnedConstThorRow row = _row; out->putRow(row.getLink()); idx++; if (idx==interval) { idx = 0; if (overflowed||rowArray.isFull()) { overflowsize = out->getPosition(); if (!overflowed) { PROGLOG("Sample buffer full"); overflowed = true; } } else rowArray.append(row.getClear()); } writeidxofs(start); }
void writeidxofs(offset_t o) { // lazy index write if (outidx.get()) { outidx->write(sizeof(o),&o); return; } if (lastofs) { if (fixedsize!=o-lastofs) { // right create idx StringBuffer tempname; GetTempName(tempname.clear(),"srtidx",false); outidxfile.setown(createIFile(tempname.str())); Owned<IFileIO> fileioidx = outidxfile->open(IFOcreate); outidx.setown(fileioidx?createBufferedIOStream(fileioidx,0x100000):NULL); if (outidx.get()==NULL) { StringBuffer err; err.append("Cannot create ").append(outidxfile->queryFilename()); LOG(MCerror, thorJob, "%s", err.str()); throw MakeStringException(-1, "%s", err.str()); } offset_t s = 0; while (s<=lastofs) { outidx->write(sizeof(s),&s); s += fixedsize; } assertex(s==lastofs+fixedsize); fixedsize = 0; writeidxofs(o); return; } } else fixedsize = (size32_t)(o-lastofs); lastofs = o; }
main(){ Linked L; L.append(1); L.append(3); L.append(5); L.append(3); L.append(5); L.append(11); L.displayList(); L.remDups(); std::cout << "duplicates removed \n" ; std::cout << L ; }
virtual offset_t getPosition() { return out->getPosition(); }
void prepareKey() { IDistributedFile *f = index.get(); IDistributedSuperFile *super = f->querySuperFile(); unsigned nparts = f->numParts(); // includes tlks if any, but unused in array performPartLookup.ensure(nparts); bool checkTLKConsistency = NULL != super && 0 != (TIRsorted & indexBaseHelper->getFlags()); if (nofilter) { while (nparts--) performPartLookup.append(true); if (!checkTLKConsistency) return; } else { while (nparts--) performPartLookup.append(false); // parts to perform lookup set later } Owned<IDistributedFileIterator> iter; if (super) { iter.setown(super->getSubFileIterator(true)); verifyex(iter->first()); f = &iter->query(); } unsigned width = f->numParts()-1; assertex(width); unsigned tlkCrc; bool first = true; unsigned superSubIndex=0; bool fileCrc = false, rowCrc = false; loop { Owned<IDistributedFilePart> part = f->getPart(width); if (checkTLKConsistency) { unsigned _tlkCrc; if (part->getCrc(_tlkCrc)) fileCrc = true; else if (part->queryAttributes().hasProp("@crc")) // NB: key "@crc" is not a crc on the file, but data within. { _tlkCrc = part->queryAttributes().getPropInt("@crc"); rowCrc = true; } else if (part->queryAttributes().hasProp("@tlkCrc")) // backward compat. { _tlkCrc = part->queryAttributes().getPropInt("@tlkCrc"); rowCrc = true; } else { if (rowCrc || fileCrc) { checkTLKConsistency = false; Owned<IException> e = MakeActivityWarning(&container, 0, "Cannot validate that tlks in superfile %s match, some crc attributes are missing", super->queryLogicalName()); container.queryJob().fireException(e); } } if (rowCrc && fileCrc) { checkTLKConsistency = false; Owned<IException> e = MakeActivityWarning(&container, 0, "Cannot validate that tlks in superfile %s match, due to mixed crc types.", super->queryLogicalName()); container.queryJob().fireException(e); } if (checkTLKConsistency) { if (first) { tlkCrc = _tlkCrc; first = false; } else if (tlkCrc != _tlkCrc) throw MakeActivityException(this, 0, "Sorted output on super files comprising of non coparitioned sub keys is not supported (TLK's do not match)"); } } if (!nofilter) { Owned<IKeyIndex> keyIndex; unsigned copy; for (copy=0; copy<part->numCopies(); copy++) { RemoteFilename rfn; OwnedIFile ifile = createIFile(part->getFilename(rfn,copy)); if (ifile->exists()) { StringBuffer remotePath; rfn.getRemotePath(remotePath); unsigned crc; part->getCrc(crc); keyIndex.setown(createKeyIndex(remotePath.str(), crc, false, false)); break; } } if (!keyIndex) throw MakeThorException(TE_FileNotFound, "Top level key part does not exist, for key: %s", indexBaseHelper->getFileName()); unsigned maxSize = indexBaseHelper->queryDiskRecordSize()->querySerializedMeta()->getRecordSize(NULL); // used only if fixed Owned <IKeyManager> tlk = createKeyManager(keyIndex, maxSize, NULL); indexBaseHelper->createSegmentMonitors(tlk); tlk->finishSegmentMonitors(); tlk->reset(); while (tlk->lookup(false)) { if (tlk->queryFpos()) performPartLookup.replace(true, (aindex_t)(super?super->numSubFiles(true)*(tlk->queryFpos()-1)+superSubIndex:tlk->queryFpos()-1)); } } if (!super||!iter->next()) break; superSubIndex++; f = &iter->query(); if (width != f->numParts()-1) throw MakeActivityException(this, 0, "Super key %s, with mixture of sub key width are not supported.", f->queryLogicalName()); } }
inline void Linked<C>::insert_before(Linked<C>& x) { x.set_prev(prev()); x.set_next(*this);//thisは、ROOTインスタンスを指す。 prev().set_next(x); set_prev(x); }
CDigitalSignatureManager(CLoadedKey *_pubKey, CLoadedKey *_privKey) : pubKey(_pubKey), privKey(_privKey) { signingConfigured = nullptr != privKey.get(); verifyingConfigured = nullptr != pubKey.get(); }
virtual void process() override { ActPrintLog("INDEXWRITE: Start"); init(); IRowStream *stream = inputStream; ThorDataLinkMetaInfo info; input->getMetaInfo(info); outRowAllocator.setown(getRowAllocator(helper->queryDiskRecordSize())); start(); if (refactor) { assertex(isLocal); if (active) { unsigned targetWidth = partDesc->queryOwner().numParts()-(buildTlk?1:0); assertex(0 == container.queryJob().querySlaves() % targetWidth); unsigned partsPerNode = container.queryJob().querySlaves() / targetWidth; unsigned myPart = queryJobChannel().queryMyRank(); IArrayOf<IRowStream> streams; streams.append(*LINK(stream)); --partsPerNode; // Should this be merging 1,11,21,31 etc. unsigned p=0; unsigned fromPart = targetWidth+1 + (partsPerNode * (myPart-1)); for (; p<partsPerNode; p++) { streams.append(*createRowStreamFromNode(*this, fromPart++, queryJobChannel().queryJobComm(), mpTag, abortSoon)); } ICompare *icompare = helper->queryCompare(); assertex(icompare); Owned<IRowLinkCounter> linkCounter = new CThorRowLinkCounter; myInputStream.setown(createRowStreamMerger(streams.ordinality(), streams.getArray(), icompare, false, linkCounter)); stream = myInputStream; } else // serve nodes, creating merged parts rowServer.setown(createRowServer(this, stream, queryJobChannel().queryJobComm(), mpTag)); } processed = THORDATALINK_STARTED; // single part key support // has to serially pull all data fron nodes 2-N // nodes 2-N, could/should start pushing some data (as it's supposed to be small) to cut down on serial nature. unsigned node = queryJobChannel().queryMyRank(); if (singlePartKey) { if (1 == node) { try { open(*partDesc, false, helper->queryDiskRecordSize()->isVariableSize()); loop { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; if (abortSoon) return; processRow(row); } unsigned node = 2; while (node <= container.queryJob().querySlaves()) { Linked<IOutputRowDeserializer> deserializer = ::queryRowDeserializer(input); CMessageBuffer mb; Owned<ISerialStream> stream = createMemoryBufferSerialStream(mb); CThorStreamDeserializerSource rowSource; rowSource.setStream(stream); bool successSR; loop { { BooleanOnOff tf(receivingTag2); successSR = queryJobChannel().queryJobComm().sendRecv(mb, node, mpTag2); } if (successSR) { if (rowSource.eos()) break; Linked<IEngineRowAllocator> allocator = ::queryRowAllocator(input); do { RtlDynamicRowBuilder rowBuilder(allocator); size32_t sz = deserializer->deserialize(rowBuilder, rowSource); OwnedConstThorRow fRow = rowBuilder.finalizeRowClear(sz); processRow(fRow); } while (!rowSource.eos()); } } node++; } } catch (CATCHALL) { close(*partDesc, partCrc, true); throw; } close(*partDesc, partCrc, true); doStopInput(); } else { CMessageBuffer mb; CMemoryRowSerializer mbs(mb); Linked<IOutputRowSerializer> serializer = ::queryRowSerializer(input); loop { BooleanOnOff tf(receivingTag2); if (queryJobChannel().queryJobComm().recv(mb, 1, mpTag2)) // node 1 asking for more.. { if (abortSoon) break; mb.clear(); do { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; serializer->serialize(mbs, (const byte *)row.get()); } while (mb.length() < SINGLEPART_KEY_TRANSFER_SIZE); // NB: at least one row if (!queryJobChannel().queryJobComm().reply(mb)) throw MakeThorException(0, "Failed to send index data to node 1, from node %d", node); if (0 == mb.length()) break; } } } }
virtual const char * queryKeyName() const override { return pubKey->queryKeyName(); }
void processMessage(CMessageBuffer &mb) { ICoven &coven=queryCoven(); MemoryBuffer params; params.swapWith(mb); int fn; params.read(fn); switch (fn) { case MDR_GET_VALUE: { StringAttr id; StringBuffer buf; params.read(id); if (0 == stricmp(id,"threads")) { mb.append(getThreadList(buf).str()); } else if (0 == stricmp(id, "mpqueue")) { mb.append(getReceiveQueueDetails(buf).str()); } else if (0 == stricmp(id, "locks")) { mb.append(querySDS().getLocks(buf).str()); } else if (0 == stricmp(id, "sdsstats")) { mb.append(querySDS().getUsageStats(buf).str()); } else if (0 == stricmp(id, "connections")) { mb.append(querySDS().getConnections(buf).str()); } else if (0 == stricmp(id, "sdssubscribers")) { mb.append(querySDS().getSubscribers(buf).str()); } else if (0 == stricmp(id, "clients")) { mb.append(querySessionManager().getClientProcessList(buf).str()); } else if (0 == stricmp(id, "subscriptions")) { mb.append(getSubscriptionList(buf).str()); } else if (0 == stricmp(id, "mpverify")) { queryWorldCommunicator().verifyAll(buf); mb.append(buf.str()); } else if (0 == stricmp(id, "extconsistency")) { mb.append(querySDS().getExternalReport(buf).str()); } else if (0 == stricmp(id, "build")) { mb.append("$Id: dadiags.cpp 62376 2011-02-04 21:59:58Z sort $"); } else if (0 == stricmp(id, "sdsfetch")) { StringAttr branchpath; params.read(branchpath); Linked<IPropertyTree> sroot = querySDSServer().lockStoreRead(); try { sroot->queryPropTree(branchpath)->serialize(mb); } catch (...) { querySDSServer().unlockStoreRead(); throw; } querySDSServer().unlockStoreRead(); } else if (0 == stricmp(id, "perf")) { getSystemTraceInfo(buf,PerfMonStandard); mb.append(buf.str()); } else if (0 == stricmp(id, "sdssize")) { StringAttr branchpath; params.read(branchpath); Linked<IPropertyTree> sroot = querySDSServer().lockStoreRead(); StringBuffer sbuf; try { toXML(sroot->queryPropTree(branchpath),sbuf); DBGLOG("sdssize '%s' = %d",branchpath.get(),sbuf.length()); } catch (...) { querySDSServer().unlockStoreRead(); throw; } querySDSServer().unlockStoreRead(); mb.append(sbuf.length()); } else if (0 == stricmp(id, "disconnect")) { StringAttr client; params.read(client); SocketEndpoint ep(client); PROGLOG("Dalidiag request to close client connection: %s", client.get()); Owned<INode> node = createINode(ep); queryCoven().disconnect(node); } else if (0 == stricmp(id, "unlock")) { __int64 connectionId; bool disconnect; params.read(connectionId); params.read(disconnect); PROGLOG("Dalidiag request to unlock connection id: %" I64F "x", connectionId); StringBuffer connectionInfo; bool success = querySDSServer().unlock(connectionId, disconnect, connectionInfo); mb.append(success); if (success) mb.append(connectionInfo); } else if (0 == stricmp(id, "save")) { PROGLOG("Dalidiag requests SDS save"); querySDSServer().saveRequest(); } else if (0 == stricmp(id, "settracetransactions")) { PROGLOG("Dalidiag requests Trace Transactions"); if(traceAllTransactions(true)) mb.append("OK - no change"); else mb.append("OK - transaction tracing enabled"); } else if (0 == stricmp(id, "cleartracetransactions")) { PROGLOG("Dalidiag requests Trace Transactions stopped"); if(traceAllTransactions(false)) mb.append("OK - transaction tracing disabled"); else mb.append("OK - no change"); } else if (0 == stricmp(id, "setldapflags")) { unsigned f; params.read(f); PROGLOG("Dalidiag requests setldapflags %d",f); querySessionManager().setLDAPflags(f); } else if (0 == stricmp(id, "getldapflags")) { unsigned f=querySessionManager().getLDAPflags();; mb.append(f); } else if (0 == stricmp(id, "setsdsdebug")) { PROGLOG("Dalidiag setsdsdebug"); unsigned p; params.read(p); StringArray arr; while (p--) { StringAttr s; params.read(s); arr.append(s); } StringBuffer reply; bool success = querySDSServer().setSDSDebug(arr, reply); mb.append(success).append(reply); } else mb.append(StringBuffer("UNKNOWN OPTION: ").append(id).str()); } break; } coven.reply(mb); }
int run() { if (!started) { try { in->start(); started = true; } catch(IException * e) { ActPrintLog(&activity, e, "ThorLookaheadCache starting input"); startexception.setown(e); if (asyncstart) notify->onInputStarted(startexception); running = false; stopped = true; startsem.signal(); return 0; } } try { StringBuffer temp; if (allowspill) GetTempName(temp,"lookahd",true); assertex(bufsize); if (allowspill) smartbuf.setown(createSmartBuffer(&activity, temp.toCharArray(), bufsize, queryRowInterfaces(in))); else smartbuf.setown(createSmartInMemoryBuffer(&activity, queryRowInterfaces(in), bufsize)); if (notify) notify->onInputStarted(NULL); startsem.signal(); Linked<IRowWriter> writer = smartbuf->queryWriter(); if (preserveLhsGrouping) { while (required&&running) { OwnedConstThorRow row = in->nextRow(); if (!row) { row.setown(in->nextRow()); if (!row) break; else writer->putRow(NULL); // eog } ++count; writer->putRow(row.getClear()); if (required!=RCUNBOUND) required--; } } else { while (required&&running) { OwnedConstThorRow row = in->ungroupedNextRow(); if (!row) break; ++count; writer->putRow(row.getClear()); if (required!=RCUNBOUND) required--; } } } catch(IException * e) { ActPrintLog(&activity, e, "ThorLookaheadCache get exception"); getexception.setown(e); } if (notify) notify->onInputFinished(count); if (smartbuf) smartbuf->queryWriter()->flush(); running = false; try { if (in) in->stop(); } catch(IException * e) { ActPrintLog(&activity, e, "ThorLookaheadCache stop exception"); if (!getexception.get()) getexception.setown(e); } return 0; }
unsigned __int64 queryTotalCycles() const { return in->queryTotalCycles(); }
CActivityBase *queryFromActivity() { return in->queryFromActivity(); }
void getMetaInfo(ThorDataLinkMetaInfo &info) { memset(&info,0,sizeof(info)); in->getMetaInfo(info); // more TBD }
void Interstitial::evaluate(const ISO& iso, const Symmetry& symmetry, int numPointsPerAtom, double tol, double scale) { // Clear space clear(); // Output Output::newline(); Output::print("Searching for interstitial sites using "); Output::print(numPointsPerAtom); Output::print(" starting point"); if (numPointsPerAtom != 1) Output::print("s"); Output::print(" per atom and a scale of "); Output::print(scale); Output::increase(); // Constants used in generating points on sphere around each atom double phiScale = Constants::pi * (3 - sqrt(5)); double yScale = 2.0 / numPointsPerAtom; // Loop over unique atoms in the structure int i, j, k; int count = 0; double y; double r; double phi; double curDistance; double nearDistance; double startDistance; Vector3D curPoint; Linked<Vector3D > points; for (i = 0; i < symmetry.orbits().length(); ++i) { // Get the distance to nearest atom in the structure nearDistance = -1; for (j = 0; j < iso.atoms().length(); ++j) { for (k = 0; k < iso.atoms()[j].length(); ++k) { curDistance = iso.basis().distance(symmetry.orbits()[i].atoms()[0]->fractional(), FRACTIONAL, \ iso.atoms()[j][k].fractional(), FRACTIONAL); if (curDistance > 0) { if ((nearDistance == -1) || (curDistance < nearDistance)) nearDistance = curDistance; } } } // Set the starting distance away from atom startDistance = nearDistance / 2; // Loop over starting points for (j = 0; j < numPointsPerAtom; ++j) { // Check if running current point on current processor if ((++count + Multi::rank()) % Multi::worldSize() == 0) { // Get current starting point y = j * yScale - 1 + (yScale / 2); r = sqrt(1 - y*y); phi = j * phiScale; curPoint.set(symmetry.orbits()[i].atoms()[0]->cartesian()[0] + startDistance*r*cos(phi), \ symmetry.orbits()[i].atoms()[0]->cartesian()[1] + startDistance*y, \ symmetry.orbits()[i].atoms()[0]->cartesian()[2] + startDistance*r*sin(phi)); // Minimize the current point if (!minimizePoint(curPoint, iso, scale)) continue; // Save current point in fractional coordinates points += iso.basis().getFractional(curPoint); ISO::moveIntoCell(*points.last()); } } } // Reduce list of points to unique ones int m; bool found; int numLoops; Vector3D rotPoint; Vector3D equivPoint; Vector3D origin(0.0); Linked<double> distances; Linked<double>::iterator itDist; Linked<Vector3D> uniquePoints; Linked<Vector3D>::iterator it; Linked<Vector3D>::iterator itUnique; for (i = 0; i < Multi::worldSize(); ++i) { // Send number of points in list on current processor numLoops = points.length(); Multi::broadcast(numLoops, i); // Loop over points if (i == Multi::rank()) it = points.begin(); for (j = 0; j < numLoops; ++j) { // Send out current point if (i == Multi::rank()) { curPoint = *it; ++it; } Multi::broadcast(curPoint, i); // Get current distance to origin curDistance = iso.basis().distance(curPoint, FRACTIONAL, origin, FRACTIONAL); // Loop over points that were already saved found = false; itDist = distances.begin(); itUnique = uniquePoints.begin(); for (; itDist != distances.end(); ++itDist, ++itUnique) { // Current points are not the same if (Num<double>::abs(curDistance - *itDist) <= tol) { if (iso.basis().distance(curPoint, FRACTIONAL, *itUnique, FRACTIONAL) <= tol) { found = true; break; } } // Loop over symmetry operations for (k = 0; k < symmetry.operations().length(); ++k) { // Loop over translations rotPoint = symmetry.operations()[k].rotation() * curPoint; for (m = 0; m < symmetry.operations()[k].translations().length(); ++m) { // Check if points are the same equivPoint = rotPoint; equivPoint += symmetry.operations()[k].translations()[m]; if (iso.basis().distance(equivPoint, FRACTIONAL, *itUnique, FRACTIONAL) <= tol) { found = true; break; } } if (found) break; } if (found) break; } // Found a new point if (!found) { distances += curDistance; uniquePoints += curPoint; } } } // Save unique points _sites.length(uniquePoints.length()); for (i = 0, it = uniquePoints.begin(); it != uniquePoints.end(); ++i, ++it) _sites[i] = *it; // Output Output::newline(); Output::print("Found "); Output::print(_sites.length()); Output::print(" possible interstitial site"); if (_sites.length() != 1) Output::print("s"); Output::increase(); for (i = 0; i < _sites.length(); ++i) { Output::newline(); Output::print("Site "); Output::print(i+1); Output::print(":"); for (j = 0; j < 3; ++j) { Output::print(" "); Output::print(_sites[i][j], 8); } } Output::decrease(); // Output Output::decrease(); }
//MORE: Really this should create no_selects for the sub records, but pass on that for the moment. void DataSourceMetaData::gatherFields(IHqlExpression * expr, bool isConditional, bool *pMixedContent) { switch (expr->getOperator()) { case no_record: gatherChildFields(expr, isConditional, pMixedContent); break; case no_ifblock: { OwnedITypeInfo boolType = makeBoolType(); OwnedITypeInfo voidType = makeVoidType(); isStoredFixedWidth = false; fields.append(*new DataSourceMetaItem(FVFFbeginif, NULL, NULL, boolType)); gatherChildFields(expr->queryChild(1), true, pMixedContent); fields.append(*new DataSourceMetaItem(FVFFendif, NULL, NULL, voidType)); break; } case no_field: { if (expr->hasAttribute(__ifblockAtom)) break; Linked<ITypeInfo> type = expr->queryType(); IAtom * name = expr->queryName(); IHqlExpression * nameAttr = expr->queryAttribute(namedAtom); StringBuffer outname; if (nameAttr && nameAttr->queryChild(0)->queryValue()) nameAttr->queryChild(0)->queryValue()->getStringValue(outname); else outname.append(name).toLowerCase(); StringBuffer xpathtext; const char * xpath = NULL; IHqlExpression * xpathAttr = expr->queryAttribute(xpathAtom); if (xpathAttr && xpathAttr->queryChild(0)->queryValue()) xpath = xpathAttr->queryChild(0)->queryValue()->getStringValue(xpathtext); unsigned flag = FVFFnone; if (isKey() && expr->hasAttribute(blobAtom)) { type.setown(makeIntType(8, false)); flag = FVFFblob; } type_t tc = type->getTypeCode(); if (tc == type_row) { OwnedITypeInfo voidType = makeVoidType(); Owned<DataSourceMetaItem> begin = new DataSourceMetaItem(FVFFbeginrecord, outname, xpath, voidType); //inherit mixed content from child row with xpath('') bool *pItemMixedContent = (pMixedContent && xpath && !*xpath) ? pMixedContent : &(begin->hasMixedContent); fields.append(*begin.getClear()); gatherChildFields(expr->queryRecord(), isConditional, pItemMixedContent); fields.append(*new DataSourceMetaItem(FVFFendrecord, outname, xpath, voidType)); } else if ((tc == type_dictionary) || (tc == type_table) || (tc == type_groupedtable)) { isStoredFixedWidth = false; Owned<DataSourceDatasetItem> ds = new DataSourceDatasetItem(outname, xpath, expr); if (pMixedContent && xpath && !*xpath) *pMixedContent = ds->queryChildMeta()->hasMixedContent; fields.append(*ds.getClear()); } else if (tc == type_set) { isStoredFixedWidth = false; if (pMixedContent && xpath && !*xpath) *pMixedContent = true; fields.append(*new DataSourceSetItem(outname, xpath, type)); } else { if (type->getTypeCode() == type_alien) { IHqlAlienTypeInfo * alien = queryAlienType(type); type.set(alien->queryPhysicalType()); } if (pMixedContent && xpath && !*xpath) *pMixedContent = true; addSimpleField(outname, xpath, type, flag); } break; } } }
void Interstitial::voronoi(const ISO& iso, const Symmetry& symmetry, double tol) { // Clear space clear(); // Output Output::newline(); Output::print("Searching for interstitial sites using Voronoi method"); Output::increase(); // Set up image iterator ImageIterator images; images.setCell(iso.basis(), 12); // Loop over unique atoms in the structure int i, j, k; List<double> weights; OList<Vector3D> points; OList<Vector3D> vertices; Linked<Vector3D> intPoints; for (i = 0; i < symmetry.orbits().length(); ++i) { // Reset variables weights.length(0); points.length(0); vertices.length(0); // Loop over atoms in the structure for (j = 0; j < iso.atoms().length(); ++j) { for (k = 0; k < iso.atoms()[j].length(); ++k) { // Loop over images images.reset(symmetry.orbits()[i].atoms()[0]->fractional(), iso.atoms()[j][k].fractional()); while (!images.finished()) { // Skip if atoms are the same if (++images < 1e-8) continue; // Save current point points += symmetry.orbits()[i].atoms()[0]->cartesian() + images.cartVector(); weights += 0.5; } } } // Calculate Voronoi volume symmetry.orbits()[i].atoms()[0]->cartesian().voronoi(points, weights, tol, &vertices); // Save points for (j = 0; j < vertices.length(); ++j) { intPoints += iso.basis().getFractional(vertices[j]); ISO::moveIntoCell(*intPoints.last()); } } // Reduce points to unique ones bool found; double curDistance; Vector3D rotPoint; Vector3D equivPoint; Vector3D origin(0.0); Linked<double> distances; Linked<double>::iterator itDist; Linked<Vector3D>::iterator it; Linked<Vector3D> uniquePoints; Linked<Vector3D>::iterator itUnique; for (it = intPoints.begin(); it != intPoints.end(); ++it) { // Get current distance to origin curDistance = iso.basis().distance(*it, FRACTIONAL, origin, FRACTIONAL); // Loop over points that were already saved found = false; itDist = distances.begin(); itUnique = uniquePoints.begin(); for (; itDist != distances.end(); ++itDist, ++itUnique) { // Current points are not the same if (Num<double>::abs(curDistance - *itDist) <= tol) { if (iso.basis().distance(*it, FRACTIONAL, *itUnique, FRACTIONAL) <= tol) { found = true; break; } } // Loop over symmetry operations for (i = 0; i < symmetry.operations().length(); ++i) { // Loop over translations rotPoint = symmetry.operations()[i].rotation() * *it; for (j = 0; j < symmetry.operations()[i].translations().length(); ++j) { // Check if points are the same equivPoint = rotPoint; equivPoint += symmetry.operations()[i].translations()[j]; if (iso.basis().distance(equivPoint, FRACTIONAL, *itUnique, FRACTIONAL) <= tol) { found = true; break; } } if (found) break; } if (found) break; } // Found a new point if (!found) { distances += curDistance; uniquePoints += *it; } } // Save unique points _sites.length(uniquePoints.length()); for (i = 0, it = uniquePoints.begin(); it != uniquePoints.end(); ++i, ++it) _sites[i] = *it; // Output Output::newline(); Output::print("Found "); Output::print(_sites.length()); Output::print(" possible interstitial site"); if (_sites.length() != 1) Output::print("s"); Output::increase(); for (i = 0; i < _sites.length(); ++i) { Output::newline(); Output::print("Site "); Output::print(i+1); Output::print(":"); for (j = 0; j < 3; ++j) { Output::print(" "); Output::print(_sites[i][j], 8); } } Output::decrease(); // Output Output::decrease(); }
//MORE: Really this should create no_selects for the sub records, but pass on that for the moment. void DataSourceMetaData::gatherFields(IHqlExpression * expr, bool isConditional) { switch (expr->getOperator()) { case no_record: gatherChildFields(expr, isConditional); break; case no_ifblock: { OwnedITypeInfo boolType = makeBoolType(); OwnedITypeInfo voidType = makeVoidType(); isStoredFixedWidth = false; fields.append(*new DataSourceMetaItem(FVFFbeginif, NULL, NULL, boolType)); gatherChildFields(expr->queryChild(1), true); fields.append(*new DataSourceMetaItem(FVFFendif, NULL, NULL, voidType)); break; } case no_field: { if (expr->hasProperty(__ifblockAtom)) break; Linked<ITypeInfo> type = expr->queryType(); IAtom * name = expr->queryName(); IHqlExpression * nameAttr = expr->queryProperty(namedAtom); StringBuffer outname; if (nameAttr && nameAttr->queryChild(0)->queryValue()) nameAttr->queryChild(0)->queryValue()->getStringValue(outname); else outname.append(name).toLowerCase(); StringBuffer xpathtext; const char * xpath = NULL; IHqlExpression * xpathAttr = expr->queryProperty(xpathAtom); if (xpathAttr && xpathAttr->queryChild(0)->queryValue()) xpath = xpathAttr->queryChild(0)->queryValue()->getStringValue(xpathtext); if (isKey() && expr->hasProperty(blobAtom)) type.setown(makeIntType(8, false)); type_t tc = type->getTypeCode(); if (tc == type_row) { OwnedITypeInfo voidType = makeVoidType(); fields.append(*new DataSourceMetaItem(FVFFbeginrecord, outname, xpath, voidType)); gatherChildFields(expr->queryRecord(), isConditional); fields.append(*new DataSourceMetaItem(FVFFendrecord, outname, xpath, voidType)); } else if ((tc == type_table) || (tc == type_groupedtable)) { isStoredFixedWidth = false; fields.append(*new DataSourceDatasetItem(outname, xpath, expr)); } else if (tc == type_set) { isStoredFixedWidth = false; fields.append(*new DataSourceSetItem(outname, xpath, type)); } else { if (type->getTypeCode() == type_alien) { IHqlAlienTypeInfo * alien = queryAlienType(type); type.set(alien->queryPhysicalType()); } addSimpleField(outname, xpath, type); } break; } } }
virtual void process() override { ActPrintLog("INDEXWRITE: Start"); init(); IRowStream *stream = inputStream; ThorDataLinkMetaInfo info; input->getMetaInfo(info); outRowAllocator.setown(getRowAllocator(helper->queryDiskRecordSize())); start(); if (refactor) { assertex(isLocal); if (active) { unsigned targetWidth = partDesc->queryOwner().numParts()-(buildTlk?1:0); assertex(0 == container.queryJob().querySlaves() % targetWidth); unsigned partsPerNode = container.queryJob().querySlaves() / targetWidth; unsigned myPart = queryJobChannel().queryMyRank(); IArrayOf<IRowStream> streams; streams.append(*LINK(stream)); --partsPerNode; // Should this be merging 1,11,21,31 etc. unsigned p=0; unsigned fromPart = targetWidth+1 + (partsPerNode * (myPart-1)); for (; p<partsPerNode; p++) { streams.append(*createRowStreamFromNode(*this, fromPart++, queryJobChannel().queryJobComm(), mpTag, abortSoon)); } ICompare *icompare = helper->queryCompare(); assertex(icompare); Owned<IRowLinkCounter> linkCounter = new CThorRowLinkCounter; myInputStream.setown(createRowStreamMerger(streams.ordinality(), streams.getArray(), icompare, false, linkCounter)); stream = myInputStream; } else // serve nodes, creating merged parts rowServer.setown(createRowServer(this, stream, queryJobChannel().queryJobComm(), mpTag)); } processed = THORDATALINK_STARTED; // single part key support // has to serially pull all data fron nodes 2-N // nodes 2-N, could/should start pushing some data (as it's supposed to be small) to cut down on serial nature. unsigned node = queryJobChannel().queryMyRank(); if (singlePartKey) { if (1 == node) { try { open(*partDesc, false, helper->queryDiskRecordSize()->isVariableSize()); for (;;) { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; if (abortSoon) return; processRow(row); } unsigned node = 2; while (node <= container.queryJob().querySlaves()) { Linked<IOutputRowDeserializer> deserializer = ::queryRowDeserializer(input); CMessageBuffer mb; Owned<ISerialStream> stream = createMemoryBufferSerialStream(mb); CThorStreamDeserializerSource rowSource; rowSource.setStream(stream); bool successSR; for (;;) { { BooleanOnOff tf(receivingTag2); successSR = queryJobChannel().queryJobComm().sendRecv(mb, node, mpTag2); } if (successSR) { if (rowSource.eos()) break; Linked<IEngineRowAllocator> allocator = ::queryRowAllocator(input); do { RtlDynamicRowBuilder rowBuilder(allocator); size32_t sz = deserializer->deserialize(rowBuilder, rowSource); OwnedConstThorRow fRow = rowBuilder.finalizeRowClear(sz); processRow(fRow); } while (!rowSource.eos()); } } node++; } } catch (CATCHALL) { close(*partDesc, partCrc, true); throw; } close(*partDesc, partCrc, true); stop(); } else { CMessageBuffer mb; CMemoryRowSerializer mbs(mb); Linked<IOutputRowSerializer> serializer = ::queryRowSerializer(input); for (;;) { BooleanOnOff tf(receivingTag2); if (queryJobChannel().queryJobComm().recv(mb, 1, mpTag2)) // node 1 asking for more.. { if (abortSoon) break; mb.clear(); do { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; serializer->serialize(mbs, (const byte *)row.get()); } while (mb.length() < SINGLEPART_KEY_TRANSFER_SIZE); // NB: at least one row if (!queryJobChannel().queryJobComm().reply(mb)) throw MakeThorException(0, "Failed to send index data to node 1, from node %d", node); if (0 == mb.length()) break; } } } } else { if (!refactor || active) { try { StringBuffer partFname; getPartFilename(*partDesc, 0, partFname); ActPrintLog("INDEXWRITE: process: handling fname : %s", partFname.str()); open(*partDesc, false, helper->queryDiskRecordSize()->isVariableSize()); ActPrintLog("INDEXWRITE: write"); BooleanOnOff tf(receiving); if (!refactor || !active) receiving = false; do { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; processRow(row); } while (!abortSoon); ActPrintLog("INDEXWRITE: write level 0 complete"); } catch (CATCHALL) { close(*partDesc, partCrc, isLocal && !buildTlk && 1 == node); throw; } close(*partDesc, partCrc, isLocal && !buildTlk && 1 == node); stop(); ActPrintLog("INDEXWRITE: Wrote %" RCPF "d records", processed & THORDATALINK_COUNT_MASK); if (buildTlk) { ActPrintLog("INDEXWRITE: sending rows"); NodeInfoArray tlkRows; CMessageBuffer msg; if (firstNode()) { if (processed & THORDATALINK_COUNT_MASK) { if (enableTlkPart0) tlkRows.append(* new CNodeInfo(0, firstRow.get(), firstRowSize, totalCount)); tlkRows.append(* new CNodeInfo(1, lastRow.get(), lastRowSize, totalCount)); } } else { if (processed & THORDATALINK_COUNT_MASK) { CNodeInfo row(queryJobChannel().queryMyRank(), lastRow.get(), lastRowSize, totalCount); row.serialize(msg); } queryJobChannel().queryJobComm().send(msg, 1, mpTag); } if (firstNode()) { ActPrintLog("INDEXWRITE: Waiting on tlk to complete"); // JCSMORE if refactor==true, is rowsToReceive here right?? unsigned rowsToReceive = (refactor ? (tlkDesc->queryOwner().numParts()-1) : container.queryJob().querySlaves()) -1; // -1 'cos got my own in array already ActPrintLog("INDEXWRITE: will wait for info from %d slaves before writing TLK", rowsToReceive); while (rowsToReceive--) { msg.clear(); receiveMsg(msg, RANK_ALL, mpTag); // NH->JCS RANK_ALL_OTHER not supported for recv if (abortSoon) return; if (msg.length()) { CNodeInfo *ni = new CNodeInfo(); ni->deserialize(msg); tlkRows.append(*ni); } } tlkRows.sort(CNodeInfo::compare); StringBuffer path; getPartFilename(*tlkDesc, 0, path); ActPrintLog("INDEXWRITE: creating toplevel key file : %s", path.str()); try { open(*tlkDesc, true, helper->queryDiskRecordSize()->isVariableSize()); if (tlkRows.length()) { CNodeInfo &lastNode = tlkRows.item(tlkRows.length()-1); memset(lastNode.value, 0xff, lastNode.size); } ForEachItemIn(idx, tlkRows) { CNodeInfo &info = tlkRows.item(idx); builder->processKeyData((char *)info.value, info.pos, info.size); } close(*tlkDesc, tlkCrc, true); } catch (CATCHALL) { abortSoon = true; close(*tlkDesc, tlkCrc, true); removeFiles(*partDesc); throw; } } } else if (!isLocal && firstNode())
void PrintLink(Linked<T> &link) { for(link.Reset(); !link.EndOfList(); link.Next()) std::cout<<link.Data()<<std::endl; }