bool doSwap(const char *oldip, const char *newip) { Owned<INode> newNode = createINode(newip); Owned<INode> oldNode = createINode(oldip); if (!group->isMember(oldNode)) { ERRLOG("Node %s is not part of group %s", oldip, groupName.get()); return false; } if (group->isMember(newNode)) { ERRLOG("Node %s is already part of group %s", newip, groupName.get()); return false; } queryNamedGroupStore().swapNode(oldNode->endpoint(),newNode->endpoint()); return true; }
IFvDataSource * createRemoteFileDataSource(const SocketEndpoint & server, const char * username, const char * password, const char * logicalName) { Owned<INode> serverNode = createINode(server); CMessageBuffer msg; msg.setEndian(__BIG_ENDIAN); msg.append((byte)FVCMDcreatefile); msg.append(myProcessSession()); msg.append(username); msg.append(password); msg.append(logicalName); sendReceive(serverNode, msg); unsigned short version; unique_id_t id; __int64 numRows; bool isIndex; msg.read(version); msg.read(id); msg.read(numRows); Owned<IFvDataSourceMetaData> meta = deserializeDataSourceMeta(msg); msg.read(isIndex); if (id) return new RemoteDataSource(server, id, meta, numRows, isIndex); return 0; }
RemoteDataSource::RemoteDataSource(const SocketEndpoint & _serverEP, unique_id_t _id, IFvDataSourceMetaData * _metaData, __int64 _cachedNumRows, bool _isIndex) : serverEP(_serverEP) { id = _id; metaData.set(_metaData); serverNode.setown(createINode(serverEP)); cachedNumRows = _cachedNumRows; index = _isIndex; openCount = 0; }
int mksfs(int fresh) { int diskinit; //Create a new filesystem if the fresh variable is set if(fresh==1){ diskinit = init_fresh_disk(FILESYSTEMNAME, BLOCKSIZE, NUMBEROFBLOCKS); if(diskinit != 0){ //perror("Error creating disk"); exit(-1); } else { //To keep the root directory in memory initDirectory(); //Initialize SuperBlock mySuperBlock.magic = MAGIC; mySuperBlock.blockSize = BLOCKSIZE; mySuperBlock.fileSystemSize = NUMBEROFBLOCKS; mySuperBlock.iNodeTableLength = NUMBEROFINODES; mySuperBlock.rootINode = 0; //Use a block sized buffer unsigned char buffer[512]; memset(buffer,255,512); memcpy(buffer,&mySuperBlock,512); write_blocks(0,1,buffer); initializeFreeBitMap(); initializeFDTable(); //Now for the Inodes: initialize the first one: (for root dir) int numRootINode = createINode(RWE, 1, ROOT_UID, ROOT_GID, 1); readDirectory(); return 0; } } //if the free variable is not set, load else if (fresh==0) { diskinit = init_disk(FILESYSTEMNAME, BLOCKSIZE, NUMBEROFBLOCKS); if(diskinit != 0){ //perror("Error reading disc"); exit(-1); } else { return 0; } } return -1; }
int sfs_fopen(char *name){ int iNode; printf("Opening file"); iNode = findDirectoryEntry(name); if(iNode<0) { iNode = createINode(RWE,0,USER_UID,USER_GID,0); addToDirectory(name,iNode); int fd = addToFDTable(iNode); return fd; } else{ int fd = addToFDTable(iNode); return fd; } }
int main(int argc, char* argv[]) { InitModuleObjects(); if (argc != 3 && argc != 4) { usage(); return 0; } int nTimeOut = (argc == 4 ? atoi(argv[3]) : nDefaultTimeOut); startMPServer(0); Owned<ISashaCommand> cmd = createSashaCommand(); cmd->setAction(SCA_null); SocketEndpoint ep(argv[1], atoi(argv[2])); Owned<INode> node = createINode(ep); try { if (cmd->send(node, nTimeOut) == false) { return critical(argv[1], argv[2]); } else { return ok(); } } catch (...) { return critical(argv[1], argv[2]); } stopMPServer(); return 0; }
void processMessage(CMessageBuffer &mb) { ICoven &coven=queryCoven(); SessionId id; int fn; mb.read(fn); switch (fn) { case MSR_REGISTER_PROCESS_SESSION: { acceptConnections.wait(); acceptConnections.signal(); Owned<INode> node(deserializeINode(mb)); Owned<INode> servernode(deserializeINode(mb)); // hopefully me, but not if forwarded int role=0; if (mb.length()-mb.getPos()>=sizeof(role)) { // a capability block present mb.read(role); if (!manager.authorizeConnection(role,false)) { SocketEndpoint sender = mb.getSender(); mb.clear(); coven.reply(mb); MilliSleep(100+getRandom()%1000); // Causes client to 'work' for a short time. Owned<INode> node = createINode(sender); coven.disconnect(node); break; } #ifdef _DEBUG StringBuffer eps; PROGLOG("Connection to %s authorized",mb.getSender().getUrlStr(eps).str()); #endif } IGroup *covengrp; id = manager.registerClientProcess(node.get(),covengrp,(DaliClientRole)role); mb.clear().append(id); if (covengrp->rank(servernode)==RANK_NULL) { // must have been redirected covengrp->Release(); // no good, so just use one we know about (may use something more sophisticated later) INode *na = servernode.get(); covengrp = createIGroup(1, &na); } covengrp->serialize(mb); covengrp->Release(); coven.reply(mb); } break; case MSR_SECONDARY_REGISTER_PROCESS_SESSION: { mb.read(id); Owned<INode> node (deserializeINode(mb)); int role; mb.read(role); manager.addProcessSession(id,node.get(),(DaliClientRole)role); mb.clear(); coven.reply(mb); } break; case MSR_REGISTER_SESSION: { SecurityToken tok; SessionId parentid; mb.read(tok).read(parentid); SessionId id = manager.registerSession(tok,parentid); mb.clear().append(id); coven.reply(mb); } break; case MSR_SECONDARY_REGISTER_SESSION: { mb.read(id); manager.addSession(id); mb.clear(); coven.reply(mb); } break; case MSR_LOOKUP_PROCESS_SESSION: { // looks up from node or from id Owned<INode> node (deserializeINode(mb)); if (node->endpoint().isNull()&&(mb.length()-mb.getPos()>=sizeof(id))) { mb.read(id); INode *n = manager.getProcessSessionNode(id); if (n) node.setown(n); node->serialize(mb.clear()); } else { id = manager.lookupProcessSession(node.get()); mb.clear().append(id); } coven.reply(mb); } break; case MSR_STOP_SESSION: { SessionId sessid; bool failed; mb.read(sessid).read(failed); manager.stopSession(sessid,failed); mb.clear(); coven.reply(mb); } break; case MSR_LOOKUP_LDAP_PERMISSIONS: { StringAttr key; StringAttr obj; Owned<IUserDescriptor> udesc=createUserDescriptor(); StringAttr username; StringAttr passwordenc; mb.read(key).read(obj); udesc->deserialize(mb); #ifndef _NO_DALIUSER_STACKTRACE //following debug code to be removed StringBuffer sb; udesc->getUserName(sb); if (0==sb.length()) { DBGLOG("UNEXPECTED USER (NULL) in dasess.cpp CSessionRequestServer::processMessage() line %d", __LINE__); } #endif unsigned auditflags = 0; if (mb.length()-mb.getPos()>=sizeof(auditflags)) mb.read(auditflags); int err = 0; int ret=manager.getPermissionsLDAP(key,obj,udesc,auditflags,&err); mb.clear().append(ret); if (err) mb.append(err); coven.reply(mb); } break; } }
static void test2() { const size32_t recsize = 17; printf("Test DFS\n"); StringBuffer s; unsigned i; unsigned n; unsigned t; queryNamedGroupStore().remove("daregress_group"); queryDistributedFileDirectory().removeEntry("daregress::superfile1"); SocketEndpointArray epa; for (n=0;n<400;n++) { s.clear().append("192.168.").append(n/256).append('.').append(n%256); SocketEndpoint ep(s.str()); epa.append(ep); } Owned<IGroup> group = createIGroup(epa); queryNamedGroupStore().add("daregress_group",group,true); if (!queryNamedGroupStore().find(group,s.clear())) ERROR("Created logical group not found"); if (stricmp(s.str(),"daregress_group")!=0) ERROR("Created logical group found with wrong name"); group.setown(queryNamedGroupStore().lookup("daregress_group")); if (!group) ERROR("named group lookup failed"); printf("Named group created - 400 nodes\n"); for (i=0;i<100;i++) { Owned<IPropertyTree> pp = createPTree("Part"); Owned<IFileDescriptor>fdesc = createFileDescriptor(); fdesc->setDefaultDir("c:\\thordata\\regress"); n = 9; for (unsigned k=0;k<400;k++) { s.clear().append("192.168.").append(n/256).append('.').append(n%256); Owned<INode> node = createINode(s.str()); pp->setPropInt64("@size",(n*777+i)*recsize); s.clear().append("daregress_test").append(i).append("._").append(n+1).append("_of_400"); fdesc->setPart(n,node,s.str(),pp); n = (n+9)%400; } fdesc->queryProperties().setPropInt("@recordSize",17); s.clear().append("daregress::test").append(i); queryDistributedFileDirectory().removeEntry(s.str()); StringBuffer cname; Owned<IDistributedFile> dfile = queryDistributedFileDirectory().createNew(fdesc); if (stricmp(dfile->getClusterName(0,cname),"daregress_group")!=0) ERROR1("Cluster name wrong %d",i); s.clear().append("daregress::test").append(i); dfile->attach(s.str()); } printf("DFile create done - 100 files\n"); unsigned samples = 5; t = 33; for (i=0;i<100;i++) { s.clear().append("daregress::test").append(t); if (!queryDistributedFileDirectory().exists(s.str())) ERROR1("Could not find %s",s.str()); Owned<IDistributedFile> dfile = queryDistributedFileDirectory().lookup(s.str()); if (!dfile) { ERROR1("Could not find %s",s.str()); continue; } offset_t totsz = 0; n = 11; for (unsigned k=0;k<400;k++) { Owned<IDistributedFilePart> part = dfile->getPart(n); if (!part) { ERROR2("part not found %d %d",t,n); continue; } s.clear().append("192.168.").append(n/256).append('.').append(n%256); Owned<INode> node = createINode(s.str()); if (!node->equals(part->queryNode())) ERROR2("part node mismatch %d, %d",t,n); if (part->getFileSize(false,false)!=(n*777+t)*recsize) ERROR4("size node mismatch %d, %d, %d, %d",t,n,(unsigned)part->getFileSize(false,false),(n*777+t)*recsize); s.clear().append("daregress_test").append(t).append("._").append(n+1).append("_of_400"); /* ** TBD if (stricmp(s.str(),part->queryPartName())!=0) ERROR4("part name mismatch %d, %d '%s' '%s'",t,n,s.str(),part->queryPartName()); */ totsz += (n*777+t)*recsize; if ((samples>0)&&(i+n+t==k)) { samples--; RemoteFilename rfn; part->getFilename(rfn,samples%2); StringBuffer fn; rfn.getRemotePath(fn); printf("SAMPLE: %d,%d %s\n",t,n,fn.str()); } n = (n+11)%400; } if (totsz!=dfile->getFileSize(false,false)) ERROR1("total size mismatch %d",t); t = (t+33)%100; } printf("DFile lookup done - 100 files\n"); // check iteration __int64 crctot = 0; unsigned np = 0; unsigned totrows = 0; Owned<IDistributedFileIterator> fiter = queryDistributedFileDirectory().getIterator("daregress::*",false); Owned<IDistributedFilePartIterator> piter; ForEach(*fiter) { piter.setown(fiter->query().getIterator()); ForEach(*piter) { RemoteFilename rfn; StringBuffer s; piter->query().getFilename(rfn,0); rfn.getRemotePath(s); piter->query().getFilename(rfn,1); rfn.getRemotePath(s); crctot += crc32(s.str(),s.length(),0); np++; totrows += (unsigned)(piter->query().getFileSize(false,false)/fiter->query().queryProperties().getPropInt("@recordSize",-1)); } } piter.clear(); fiter.clear(); printf("DFile iterate done - %d parts, %d rows, CRC sum %"I64F"d\n",np,totrows,crctot); Owned<IDistributedSuperFile> sfile; sfile.setown(queryDistributedFileDirectory().createSuperFile("daregress::superfile1",true)); for (i = 0;i<100;i++) { s.clear().append("daregress::test").append(i); sfile->addSubFile(s.str()); } sfile.clear(); sfile.setown(queryDistributedFileDirectory().lookupSuperFile("daregress::superfile1")); if (!sfile) { ERROR("Could not find added superfile"); return; } __int64 savcrc = crctot; crctot = 0; np = 0; totrows = 0; size32_t srs = (size32_t)sfile->queryProperties().getPropInt("@recordSize",-1); if (srs!=17) ERROR1("Superfile does not match subfile row size %d",srs); piter.setown(sfile->getIterator()); ForEach(*piter) { RemoteFilename rfn; StringBuffer s; piter->query().getFilename(rfn,0); rfn.getRemotePath(s); piter->query().getFilename(rfn,1); rfn.getRemotePath(s); crctot += crc32(s.str(),s.length(),0); np++; totrows += (unsigned)(piter->query().getFileSize(false,false)/srs); } piter.clear(); printf("Superfile iterate done - %d parts, %d rows, CRC sum %"I64F"d\n",np,totrows,crctot); if (crctot!=savcrc) ERROR("SuperFile does not match sub files"); unsigned tr = (unsigned)(sfile->getFileSize(false,false)/srs); if (totrows!=tr) ERROR1("Superfile size does not match part sum %d",tr); sfile->detach(); sfile.clear(); sfile.setown(queryDistributedFileDirectory().lookupSuperFile("daregress::superfile1")); if (sfile) ERROR("Superfile deletion failed"); t = 37; for (i=0;i<100;i++) { s.clear().append("daregress::test").append(t); if (i%1) { Owned<IDistributedFile> dfile = queryDistributedFileDirectory().lookup(s.str()); if (!dfile) ERROR1("Could not find %s",s.str()); dfile->detach(); } else queryDistributedFileDirectory().removeEntry(s.str()); t = (t+37)%100; } printf("DFile removal complete\n"); t = 39; for (i=0;i<100;i++) { if (queryDistributedFileDirectory().exists(s.str())) ERROR1("Found %s after deletion",s.str()); Owned<IDistributedFile> dfile = queryDistributedFileDirectory().lookup(s.str()); if (dfile) ERROR1("Found %s after deletion",s.str()); t = (t+39)%100; } printf("DFile removal check complete\n"); queryNamedGroupStore().remove("daregress_group"); if (queryNamedGroupStore().lookup("daregress_group")) ERROR("Named group not removed"); }
static bool RegisterSelf(SocketEndpoint &masterEp) { StringBuffer slfStr; StringBuffer masterStr; LOG(MCdebugProgress, thorJob, "registering %s - master %s",slfEp.getUrlStr(slfStr).str(),masterEp.getUrlStr(masterStr).str()); try { SocketEndpoint ep = masterEp; ep.port = getFixedPort(getMasterPortBase(), TPORT_mp); Owned<INode> masterNode = createINode(ep); CMessageBuffer msg; if (!queryWorldCommunicator().recv(msg, masterNode, MPTAG_THORREGISTRATION)) return false; PROGLOG("Initialization received"); unsigned vmajor, vminor; msg.read(vmajor); msg.read(vminor); if (vmajor != THOR_VERSION_MAJOR || vminor != THOR_VERSION_MINOR) { replyError(TE_FailedToRegisterSlave, "Thor master/slave version mismatch"); return false; } Owned<IGroup> rawGroup = deserializeIGroup(msg); globals->Release(); globals = createPTree(msg); mergeCmdParams(globals); // cmd line unsigned slavesPerNode = globals->getPropInt("@slavesPerNode", 1); unsigned channelsPerSlave = globals->getPropInt("@channelsPerSlave", 1); unsigned localThorPortInc = globals->getPropInt("@localThorPortInc", DEFAULT_SLAVEPORTINC); unsigned slaveBasePort = globals->getPropInt("@slaveport", DEFAULT_THORSLAVEPORT); setClusterGroup(masterNode, rawGroup, slavesPerNode, channelsPerSlave, slaveBasePort, localThorPortInc); unsigned numStrands, blockSize; if (globals->hasProp("Debug/@forceNumStrands")) numStrands = globals->getPropInt("Debug/@forceNumStrands"); else { numStrands = defaultForceNumStrands; globals->setPropInt("Debug/@forceNumStrands", defaultForceNumStrands); } if (globals->hasProp("Debug/@strandBlockSize")) blockSize = globals->getPropInt("Debug/@strandBlockSize"); else { blockSize = defaultStrandBlockSize; globals->setPropInt("Debug/@strandBlockSize", defaultStrandBlockSize); } PROGLOG("Strand defaults: numStrands=%u, blockSize=%u", numStrands, blockSize); const char *_masterBuildTag = globals->queryProp("@masterBuildTag"); const char *masterBuildTag = _masterBuildTag?_masterBuildTag:"no build tag"; PROGLOG("Master build: %s", masterBuildTag); if (!_masterBuildTag || 0 != strcmp(BUILD_TAG, _masterBuildTag)) { StringBuffer errStr("Thor master/slave build mismatch, master = "); errStr.append(masterBuildTag).append(", slave = ").append(BUILD_TAG); ERRLOG("%s", errStr.str()); #ifndef _DEBUG replyError(TE_FailedToRegisterSlave, errStr.str()); return false; #endif } msg.read((unsigned &)masterSlaveMpTag); msg.clear(); msg.setReplyTag(MPTAG_THORREGISTRATION); if (!queryNodeComm().reply(msg)) return false; PROGLOG("Registration confirmation sent"); if (!queryNodeComm().recv(msg, 0, MPTAG_THORREGISTRATION)) // when all registered return false; ::masterNode = LINK(masterNode); PROGLOG("verifying mp connection to rest of cluster"); if (!queryNodeComm().verifyAll()) ERRLOG("Failed to connect to all nodes"); else PROGLOG("verified mp connection to rest of cluster"); LOG(MCdebugProgress, thorJob, "registered %s",slfStr.str()); } catch (IException *e) { FLLOG(MCexception(e), thorJob, e,"slave registration error"); e->Release(); return false; } return true; }
static bool RegisterSelf(SocketEndpoint &masterEp) { StringBuffer slfStr; StringBuffer masterStr; LOG(MCdebugProgress, thorJob, "registering %s - master %s",slfEp.getUrlStr(slfStr).toCharArray(),masterEp.getUrlStr(masterStr).toCharArray()); try { SocketEndpoint ep = masterEp; ep.port = getFixedPort(getMasterPortBase(), TPORT_mp); Owned<INode> masterNode = createINode(ep); CMessageBuffer msg; if (!queryWorldCommunicator().recv(msg, masterNode, MPTAG_THORREGISTRATION)) return false; PROGLOG("Initialization received"); unsigned vmajor, vminor; msg.read(vmajor); msg.read(vminor); if (vmajor != THOR_VERSION_MAJOR || vminor != THOR_VERSION_MINOR) { replyError("Thor master/slave version mismatch"); return false; } Owned<IGroup> group = deserializeIGroup(msg); setClusterGroup(group); SocketEndpoint myEp = queryMyNode()->endpoint(); rank_t groupPos = group->rank(queryMyNode()); if (RANK_NULL == groupPos) { replyError("Node not part of thorgroup"); return false; } if (globals->hasProp("@SLAVENUM") && (mySlaveNum != (unsigned)groupPos)) { VStringBuffer errStr("Slave group rank[%d] does not match provided cmd line slaveNum[%d]", mySlaveNum, (unsigned)groupPos); replyError(errStr.str()); return false; } globals->Release(); globals = createPTree(msg); mergeCmdParams(globals); // cmd line const char *_masterBuildTag = globals->queryProp("@masterBuildTag"); const char *masterBuildTag = _masterBuildTag?_masterBuildTag:"no build tag"; PROGLOG("Master build: %s", masterBuildTag); #ifndef _DEBUG if (!_masterBuildTag || 0 != strcmp(BUILD_TAG, _masterBuildTag)) { StringBuffer errStr("Thor master/slave build mismatch, master = "); replyError(errStr.append(masterBuildTag).append(", slave = ").append(BUILD_TAG).str()); return false; } #endif msg.read((unsigned &)masterSlaveMpTag); msg.clear(); msg.setReplyTag(MPTAG_THORREGISTRATION); if (!queryClusterComm().reply(msg)) return false; PROGLOG("Registration confirmation sent"); if (!queryClusterComm().recv(msg, 0, MPTAG_THORREGISTRATION)) // when all registered return false; PROGLOG("verifying mp connection to rest of cluster"); if (!queryClusterComm().verifyAll()) ERRLOG("Failed to connect to all nodes"); else PROGLOG("verified mp connection to rest of cluster"); ::masterNode = LINK(masterNode); LOG(MCdebugProgress, thorJob, "registered %s",slfStr.toCharArray()); } catch (IException *e) { FLLOG(MCexception(e), thorJob, e,"slave registration error"); e->Release(); return false; } return true; }
void processMessage(CMessageBuffer &mb) { ICoven &coven=queryCoven(); MemoryBuffer params; params.swapWith(mb); int fn; params.read(fn); switch (fn) { case MDR_GET_VALUE: { StringAttr id; StringBuffer buf; params.read(id); if (0 == stricmp(id,"threads")) { mb.append(getThreadList(buf).str()); } else if (0 == stricmp(id, "mpqueue")) { mb.append(getReceiveQueueDetails(buf).str()); } else if (0 == stricmp(id, "locks")) { mb.append(querySDS().getLocks(buf).str()); } else if (0 == stricmp(id, "sdsstats")) { mb.append(querySDS().getUsageStats(buf).str()); } else if (0 == stricmp(id, "connections")) { mb.append(querySDS().getConnections(buf).str()); } else if (0 == stricmp(id, "sdssubscribers")) { mb.append(querySDS().getSubscribers(buf).str()); } else if (0 == stricmp(id, "clients")) { mb.append(querySessionManager().getClientProcessList(buf).str()); } else if (0 == stricmp(id, "subscriptions")) { mb.append(getSubscriptionList(buf).str()); } else if (0 == stricmp(id, "mpverify")) { queryWorldCommunicator().verifyAll(buf); mb.append(buf.str()); } else if (0 == stricmp(id, "extconsistency")) { mb.append(querySDS().getExternalReport(buf).str()); } else if (0 == stricmp(id, "build")) { mb.append("$Id: dadiags.cpp 62376 2011-02-04 21:59:58Z sort $"); } else if (0 == stricmp(id, "sdsfetch")) { StringAttr branchpath; params.read(branchpath); Linked<IPropertyTree> sroot = querySDSServer().lockStoreRead(); try { sroot->queryPropTree(branchpath)->serialize(mb); } catch (...) { querySDSServer().unlockStoreRead(); throw; } querySDSServer().unlockStoreRead(); } else if (0 == stricmp(id, "perf")) { getSystemTraceInfo(buf,PerfMonStandard); mb.append(buf.str()); } else if (0 == stricmp(id, "sdssize")) { StringAttr branchpath; params.read(branchpath); Linked<IPropertyTree> sroot = querySDSServer().lockStoreRead(); StringBuffer sbuf; try { toXML(sroot->queryPropTree(branchpath),sbuf); DBGLOG("sdssize '%s' = %d",branchpath.get(),sbuf.length()); } catch (...) { querySDSServer().unlockStoreRead(); throw; } querySDSServer().unlockStoreRead(); mb.append(sbuf.length()); } else if (0 == stricmp(id, "disconnect")) { StringAttr client; params.read(client); SocketEndpoint ep(client); PROGLOG("Dalidiag request to close client connection: %s", client.get()); Owned<INode> node = createINode(ep); queryCoven().disconnect(node); } else if (0 == stricmp(id, "unlock")) { __int64 connectionId; bool disconnect; params.read(connectionId); params.read(disconnect); PROGLOG("Dalidiag request to unlock connection id: %" I64F "x", connectionId); StringBuffer connectionInfo; bool success = querySDSServer().unlock(connectionId, disconnect, connectionInfo); mb.append(success); if (success) mb.append(connectionInfo); } else if (0 == stricmp(id, "save")) { PROGLOG("Dalidiag requests SDS save"); querySDSServer().saveRequest(); } else if (0 == stricmp(id, "settracetransactions")) { PROGLOG("Dalidiag requests Trace Transactions"); if(traceAllTransactions(true)) mb.append("OK - no change"); else mb.append("OK - transaction tracing enabled"); } else if (0 == stricmp(id, "cleartracetransactions")) { PROGLOG("Dalidiag requests Trace Transactions stopped"); if(traceAllTransactions(false)) mb.append("OK - transaction tracing disabled"); else mb.append("OK - no change"); } else if (0 == stricmp(id, "setldapflags")) { unsigned f; params.read(f); PROGLOG("Dalidiag requests setldapflags %d",f); querySessionManager().setLDAPflags(f); } else if (0 == stricmp(id, "getldapflags")) { unsigned f=querySessionManager().getLDAPflags();; mb.append(f); } else if (0 == stricmp(id, "setsdsdebug")) { PROGLOG("Dalidiag setsdsdebug"); unsigned p; params.read(p); StringArray arr; while (p--) { StringAttr s; params.read(s); arr.append(s); } StringBuffer reply; bool success = querySDSServer().setSDSDebug(arr, reply); mb.append(success).append(reply); } else mb.append(StringBuffer("UNKNOWN OPTION: ").append(id).str()); } break; } coven.reply(mb); }
CNodeSender(unsigned _port, SocketEndpoint const & ep) : port(_port), dest(createINode(ep)) {}