bool WuResubmit(const char *wuid) { Owned<IWorkUnitFactory> factory = getWorkUnitFactory(); Owned<IWorkUnit> wu = factory->updateWorkUnit(wuid); if (!wu) { ERRLOG("WuResubmit(%s): could not find workunit",wuid); return false; } if (wu->getState()!=WUStateFailed) { ERRLOG("WuResubmit(%s): could not resubmit as workunit state is '%s'", wuid, wu->queryStateDesc()); return false; } SCMStringBuffer token; wu->getSecurityToken(token); SCMStringBuffer user; SCMStringBuffer password; extractToken(token.str(), wuid, user, password); wu->resetWorkflow(); wu->setState(WUStateSubmitted); wu->commit(); wu.clear(); submitWorkUnit(wuid,user.str(),password.str()); PROGLOG("WuResubmit(%s): resubmitted",wuid); return true; }
virtual void write() { StringBuffer rowTag; OwnedRoxieString xmlpath(helper->getXmlIteratorPath()); if (!xmlpath) { rowTag.append(DEFAULTXMLROWTAG); } else { const char *path = xmlpath; if (*path == '/') path++; if (strchr(path, '/')) UNIMPLEMENTED; rowTag.append(path); } StringBuffer out; if (!dlfn.isExternal() || firstNode()) // if external, 1 header,footer { OwnedRoxieString suppliedHeader(helper->getHeader()); if (kind==TAKjsonwrite) buildJsonHeader(out, suppliedHeader, rowTag); else if (suppliedHeader) out.set(suppliedHeader); else out.set(DEFAULTXMLHEADER).newline(); outraw->write(out.length(), out.str()); if (calcFileCrc) fileCRC.tally(out.length(), out.str()); } Owned<IXmlWriterExt> writer = createIXmlWriterExt(helper->getXmlFlags(), 0, NULL, (kind==TAKjsonwrite) ? WTJSON : WTStandard); writer->outputBeginArray(rowTag); //need this to format rows, even if not outputting it below while(!abortSoon) { OwnedConstThorRow row = input->ungroupedNextRow(); if (!row) break; writer->clear().outputBeginNested(rowTag, false); helper->toXML((const byte *)row.get(), *writer); writer->outputEndNested(rowTag); outraw->write(writer->length(), writer->str()); if (calcFileCrc) fileCRC.tally(writer->length(), writer->str()); processed++; } if (!dlfn.isExternal() || lastNode()) // if external, 1 header,footer { OwnedRoxieString suppliedFooter(helper->getFooter()); if (kind==TAKjsonwrite) buildJsonFooter(out.clear().newline(), suppliedFooter, rowTag); else if (suppliedFooter) out.set(suppliedFooter); else out.set(DEFAULTXMLFOOTER).newline(); outraw->write(out.length(), out.str()); if (calcFileCrc) fileCRC.tally(out.length(), out.str()); } }
static void test1() { printf("Test SDS read/write\n"); Owned<IPropertyTree> ref = createPTree("DAREGRESS"); fn(1,2,3,0,ref); StringBuffer refstr; toXML(ref,refstr,0,XML_SortTags|XML_Format); printf("Created reference size %d\n",refstr.length()); Owned<IRemoteConnection> conn = querySDS().connect("/DAREGRESS",myProcessSession(), RTM_CREATE, 1000000); Rconn = conn; IPropertyTree *root = conn->queryRoot(); fn(1,2,3,0,root); conn.clear(); printf("Created test branch 1\n"); conn.setown(querySDS().connect("/DAREGRESS",myProcessSession(), RTM_DELETE_ON_DISCONNECT, 1000000)); root = conn->queryRoot(); StringBuffer s; toXML(root,s,0,XML_SortTags|XML_Format); if (strcmp(s.str(),refstr.str())!=0) { ERROR("Branch 1 does not match"); } else printf("Branch 1 matches\n"); conn.clear(); conn.setown(querySDS().connect("/DAREGRESS",myProcessSession(), 0, 1000000)); if (conn) ERROR("RTM_DELETE_ON_DISCONNECT failed"); Rconn = querySDS().connect("/DAREGRESS",myProcessSession(), RTM_CREATE, 1000000); StringBuffer pn; fn2(1,2,3,0,pn); ::Release(Rconn); printf("Created test branch 2\n"); Rconn = NULL; conn.setown(querySDS().connect("/DAREGRESS",myProcessSession(), RTM_DELETE_ON_DISCONNECT, 1000000)); root = conn->queryRoot(); toXML(root,s.clear(),0,XML_SortTags|XML_Format); if (strcmp(s.str(),refstr.str())!=0) { ERROR("Branch 2 does not match"); } else printf("Branch 2 matches\n"); conn.clear(); conn.setown(querySDS().connect("/DAREGRESS",myProcessSession(), 0, 1000000)); if (conn) ERROR("RTM_DELETE_ON_DISCONNECT failed"); }
void stop() { if (global) putNext(NULL); stopInput(input); input.clear(); dataLinkStop(); }
virtual void stop() override { if (!isLocal) { barrier->wait(false); sorter->stopMerge(); } { CriticalBlock b(joinHelperCrit); joinhelper.clear(); } if (strm) { strm->stop(); strm.clear(); } PARENT::stop(); }
void stop() { if (!stopped) { abortSoon = true; stopped = true; doStop(); } input.clear(); }
virtual void stop() { if (input) { stopInput(input); input = NULL; } if(!isLocal) { barrier->wait(false); sorter->stopMerge(); } { CriticalBlock b(joinHelperCrit); joinhelper.clear(); } strm->stop(); strm.clear(); dataLinkStop(); }
offset_t write(IRowStream *input) { StringBuffer tempname; GetTempName(tempname,"srtmrg",false); dataFile.setown(createIFile(tempname.str())); Owned<IExtRowWriter> output = createRowWriter(dataFile, rowIf); bool overflowed = false; ActPrintLog(&activity, "Local Overflow Merge start"); unsigned ret=0; loop { const void *_row = input->nextRow(); if (!_row) break; ret++; OwnedConstThorRow row = _row; offset_t start = output->getPosition(); output->putRow(row.getLink()); idx++; if (idx==interval) { idx = 0; if (!sampleRows.append(row.getClear())) { // JCSMORE used to check if 'isFull()' here, but only to warn // I think this is bad news, if has run out of room here... // should at least warn in workunit I suspect overflowsize = output->getPosition(); if (!overflowed) { WARNLOG("Sample buffer full"); overflowed = true; } } } writeidxofs(start); } output->flush(); offset_t end = output->getPosition(); output.clear(); writeidxofs(end); if (idxFileIO) { idxFileStream->flush(); idxFileStream.clear(); idxFileIO.clear(); } if (overflowed) WARNLOG("Overflowed by %"I64F"d", overflowsize); ActPrintLog(&activity, "Local Overflow Merge done: overflow file '%s', size = %"I64F"d", dataFile->queryFilename(), dataFile->size()); return end; }
void kill() { ActPrintLog("MSortSlaveActivity::kill"); { CriticalBlock block(statsCs); mergeStats(spillStats, sorter); sorter.clear(); } CSlaveActivity::kill(); }
bool poll() { if (stopped||!running()) { PROGLOG(DAFS_SERVICE_DISPLAY_NAME " Stopping"); if (server) { server->stop(); server.clear(); } return false; } return true; }
void testMultiCluster() { Owned<IGroup> grp1 = createIGroup("192.168.51.1-5"); Owned<IGroup> grp2 = createIGroup("192.168.16.1-5"); Owned<IGroup> grp3 = createIGroup("192.168.53.1-5"); queryNamedGroupStore().add("testgrp1",grp1); queryNamedGroupStore().add("testgrp2",grp2); queryNamedGroupStore().add("testgrp3",grp3); Owned<IFileDescriptor> fdesc = createFileDescriptor(); fdesc->setDefaultDir("/c$/thordata/test"); fdesc->setPartMask("testfile1._$P$_of_$N$"); fdesc->setNumParts(5); ClusterPartDiskMapSpec mapping; fdesc->addCluster(grp1,mapping); fdesc->addCluster(grp2,mapping); fdesc->addCluster(grp3,mapping); queryDistributedFileDirectory().removeEntry("test::testfile1",UNKNOWN_USER); Owned<IDistributedFile> file = queryDistributedFileDirectory().createNew(fdesc); queryDistributedFileDirectory().removeEntry("test::testfile1",UNKNOWN_USER); file->attach("test::testfile1",UNKNOWN_USER); StringBuffer name; unsigned i; for (i=0;i<file->numClusters();i++) PROGLOG("cluster[%d] = %s",i,file->getClusterName(i,name.clear()).str()); file.clear(); file.setown(queryDistributedFileDirectory().lookup("test::testfile1",UNKNOWN_USER)); for (i=0;i<file->numClusters();i++) PROGLOG("cluster[%d] = %s",i,file->getClusterName(i,name.clear()).str()); file.clear(); file.setown(queryDistributedFileDirectory().lookup("test::testfile1@testgrp1",UNKNOWN_USER)); for (i=0;i<file->numClusters();i++) PROGLOG("cluster[%d] = %s",i,file->getClusterName(i,name.clear()).str()); file.clear(); queryDistributedFileDirectory().removePhysical("test::testfile1@testgrp2",UNKNOWN_USER); file.setown(queryDistributedFileDirectory().lookup("test::testfile1",UNKNOWN_USER)); for (i=0;i<file->numClusters();i++) PROGLOG("cluster[%d] = %s",i,file->getClusterName(i,name.clear()).str()); }
void stop() { try { if (server) server->stop(); } catch (IException *e) { EXCLOG(e,"dfuplus(dafilesrvstop)"); e->Release(); } server.clear(); }
virtual void start() override { ActivityTimer s(totalCycles, timeActivities); ActPrintLog(rolloverEnabled ? "GROUP: is global" : "GROUP: is local"); PARENT::start(); eogNext = prevEog = eof = false; if (rolloverEnabled) { useRollover = !lastNode(); #ifdef _TESTING ActPrintLog("Node number = %d, Total Nodes = %d", queryJobChannel().queryMyRank(), container.queryJob().querySlaves()); #endif } stream.set(inputStream); startLastGroup = getDataLinkGlobalCount(); next.setown(getNext()); if (rolloverEnabled && !firstNode()) // 1st node can have nothing to send { Owned<IThorRowCollector> collector = createThorRowCollector(*this, this, NULL, stableSort_none, rc_mixed, SPILL_PRIORITY_SPILLABLE_STREAM); Owned<IRowWriter> writer = collector->getWriter(); if (next) { ActPrintLog("GROUP: Sending first group to previous node(%d)", queryJobChannel().queryMyRank()-1); for (;;) { writer->putRow(next.getLink()); if (abortSoon) break; //always send group even when aborting OwnedConstThorRow next2 = getNext(); if (!next2) { eof = true; break; } else if (!helper->isSameGroup(next2, next)) { next.setown(next2.getClear()); break; } next.setown(next2.getClear()); } } writer.clear(); ActPrintLog("GROUP: %" RCPF "d records to send", collector->numRows()); Owned<IRowStream> strm = collector->getStream(); rowServer.setown(createRowServer(this, strm, queryJobChannel().queryJobComm(), mpTag)); } }
static IConstWorkUnit * getWorkunit(ICodeContext * ctx, const char * wuid) { StringBuffer _wuid(wuid); if (!_wuid.length()) return NULL; wuid = _wuid.toUpperCase().str(); Owned<IWorkUnitFactory> wuFactory = getWorkunitFactory(ctx); Owned<IConstWorkUnit> wu = wuFactory->openWorkUnit(wuid); if (wu) { if (!checkScopeAuthorized(ctx->queryUserDescriptor(), wu->queryWuScope())) wu.clear(); } return wu.getClear(); }
void stop() { if (output) { output->stop(); output.clear(); } ActPrintLog("SORT waiting barrier.2"); barrier->wait(false); ActPrintLog("SORT barrier.2 raised"); if (input) stopInput(input); sorter->stopMerge(); ActPrintLog("SORT waiting for merge"); dataLinkStop(); }
virtual void stop() override { if (output) { output->stop(); output.clear(); } if (hasStarted()) { ActPrintLog("SORT waiting barrier.2"); barrier->wait(false); ActPrintLog("SORT barrier.2 raised"); ActPrintLog("SORT waiting for merge"); sorter->stopMerge(); } PARENT::stop(); }
int main(int argc, const char *argv[]) { InitModuleObjects(); EnableSEHtoExceptionMapping(); NoQuickEditSection xxx; Owned<IFile> file = createIFile("dfuserver.xml"); if (file->exists()) globals.setown(createPTreeFromXMLFile("dfuserver.xml", ipt_caseInsensitive)); else globals.setown(readOldIni()); for (unsigned i=1;i<(unsigned)argc;i++) { const char *arg = argv[i]; StringBuffer prop("@"); StringBuffer val; while (*arg && *arg != '=') prop.append(*arg++); if (*arg) { arg++; while (isspace(*arg)) arg++; val.append(arg); prop.clip(); val.clip(); if (prop.length()>1) globals->setProp(prop.str(), val.str()); } } StringBuffer daliServer; StringBuffer queue; if (!globals->getProp("@DALISERVERS", daliServer)||!globals->getProp("@QUEUE", queue)) { usage(); globals.clear(); releaseAtoms(); return 1; } Owned<IFile> sentinelFile; bool stop = globals->getPropInt("@STOP",0)!=0; if (!stop) { sentinelFile.setown(createSentinelTarget()); removeSentinelFile(sentinelFile); StringBuffer logname; StringBuffer logdir; if (!getConfigurationDirectory(globals->queryPropTree("Directories"),"log","dfuserver",globals->queryProp("@name"),logdir)) globals->getProp("@LOG_DIR", logdir); if (logdir.length() && recursiveCreateDirectory(logdir.str())) logname.append(logdir); else appendCurrentDirectory(logname, true); if (logname.length() && logname.charAt(logname.length()-1) != PATHSEPCHAR) logname.append(PATHSEPCHAR); logname.append("dfuserver"); StringBuffer aliasLogName(logname); aliasLogName.append(".log"); fileMsgHandler = getRollingFileLogMsgHandler(logname.str(), ".log", MSGFIELD_STANDARD, false, true, NULL, aliasLogName.str()); queryLogMsgManager()->addMonitorOwn(fileMsgHandler, getCategoryLogMsgFilter(MSGAUD_all, MSGCLS_all, 1000)); } StringBuffer ftslogdir; if (getConfigurationDirectory(globals->queryPropTree("Directories"),"log","ftslave",globals->queryProp("@name"),ftslogdir)) // NB instance deliberately dfuserver's setFtSlaveLogDir(ftslogdir.str()); setRemoteSpawnSSH( globals->queryProp("SSH/@SSHidentityfile"), globals->queryProp("SSH/@SSHusername"), globals->queryProp("SSH/@SSHpassword"), globals->getPropInt("SSH/@SSHtimeout",0), globals->getPropInt("SSH/@SSHretries",3), "run_"); bool enableSNMP = globals->getPropInt("@enableSNMP")!=0; CSDSServerStatus *serverstatus=NULL; Owned<IReplicateServer> replserver; try { Owned<IGroup> serverGroup = createIGroup(daliServer.str(),DALI_SERVER_PORT); initClientProcess(serverGroup, DCR_DfuServer, 0, NULL, NULL, stop?(1000*30):MP_WAIT_FOREVER); setPasswordsFromSDS(); if(!stop) { if (globals->getPropBool("@enableSysLog",true)) UseSysLogForOperatorMessages(); serverstatus = new CSDSServerStatus("DFUserver"); setDaliServixSocketCaching(true); // speeds up lixux operations startLogMsgParentReceiver(); // for auditing connectLogMsgManagerToDali(); engine.setown(createDFUengine()); addAbortHandler(exitDFUserver); } const char *q = queue.str(); loop { StringBuffer subq; const char *comma = strchr(q,','); if (comma) subq.append(comma-q,q); else subq.append(q); if (stop) { stopDFUserver(subq.str()); } else { StringBuffer mask; mask.appendf("Queue[@name=\"%s\"][1]",subq.str()); IPropertyTree *t=serverstatus->queryProperties()->queryPropTree(mask.str()); if (t) t->setPropInt("@num",t->getPropInt("@num",0)+1); else { t = createPTree(); t->setProp("@name",subq.str()); t->setPropInt("@num",1); serverstatus->queryProperties()->addPropTree("Queue",t); } serverstatus->commitProperties(); engine->setDefaultTransferBufferSize((size32_t)globals->getPropInt("@transferBufferSize")); engine->startListener(subq.str(),serverstatus); } if (!comma) break; q = comma+1; if (!*q) break; } q = globals->queryProp("@MONITORQUEUE"); if (q&&*q) { if (stop) { stopDFUserver(q); } else { IPropertyTree *t=serverstatus->queryProperties()->addPropTree("MonitorQueue",createPTree()); t->setProp("@name",q); engine->startMonitor(q,serverstatus,globals->getPropInt("@MONITORINTERVAL",60)*1000); } } q = globals->queryProp("@REPLICATEQUEUE"); if (q&&*q) { if (stop) { // TBD? } else { replserver.setown(createReplicateServer(q)); replserver->runServer(); } } if (!stop) { serverstatus->commitProperties(); writeSentinelFile(sentinelFile); engine->joinListeners(); if (replserver.get()) replserver->stopServer(); LOG(MCprogress, unknownJob, "Exiting"); } } catch(IException *e){ EXCLOG(e, "DFU Server Exception: "); e->Release(); } catch (const char *s) { WARNLOG("DFU: %s",s); } delete serverstatus; if (stop) Sleep(2000); // give time to stop engine.clear(); globals.clear(); closeEnvironment(); closedownClientProcess(); UseSysLogForOperatorMessages(false); setDaliServixSocketCaching(false); releaseAtoms(); return 0; }
static void test2() { const size32_t recsize = 17; printf("Test DFS\n"); StringBuffer s; unsigned i; unsigned n; unsigned t; queryNamedGroupStore().remove("daregress_group"); queryDistributedFileDirectory().removeEntry("daregress::superfile1"); SocketEndpointArray epa; for (n=0;n<400;n++) { s.clear().append("192.168.").append(n/256).append('.').append(n%256); SocketEndpoint ep(s.str()); epa.append(ep); } Owned<IGroup> group = createIGroup(epa); queryNamedGroupStore().add("daregress_group",group,true); if (!queryNamedGroupStore().find(group,s.clear())) ERROR("Created logical group not found"); if (stricmp(s.str(),"daregress_group")!=0) ERROR("Created logical group found with wrong name"); group.setown(queryNamedGroupStore().lookup("daregress_group")); if (!group) ERROR("named group lookup failed"); printf("Named group created - 400 nodes\n"); for (i=0;i<100;i++) { Owned<IPropertyTree> pp = createPTree("Part"); Owned<IFileDescriptor>fdesc = createFileDescriptor(); fdesc->setDefaultDir("c:\\thordata\\regress"); n = 9; for (unsigned k=0;k<400;k++) { s.clear().append("192.168.").append(n/256).append('.').append(n%256); Owned<INode> node = createINode(s.str()); pp->setPropInt64("@size",(n*777+i)*recsize); s.clear().append("daregress_test").append(i).append("._").append(n+1).append("_of_400"); fdesc->setPart(n,node,s.str(),pp); n = (n+9)%400; } fdesc->queryProperties().setPropInt("@recordSize",17); s.clear().append("daregress::test").append(i); queryDistributedFileDirectory().removeEntry(s.str()); StringBuffer cname; Owned<IDistributedFile> dfile = queryDistributedFileDirectory().createNew(fdesc); if (stricmp(dfile->getClusterName(0,cname),"daregress_group")!=0) ERROR1("Cluster name wrong %d",i); s.clear().append("daregress::test").append(i); dfile->attach(s.str()); } printf("DFile create done - 100 files\n"); unsigned samples = 5; t = 33; for (i=0;i<100;i++) { s.clear().append("daregress::test").append(t); if (!queryDistributedFileDirectory().exists(s.str())) ERROR1("Could not find %s",s.str()); Owned<IDistributedFile> dfile = queryDistributedFileDirectory().lookup(s.str()); if (!dfile) { ERROR1("Could not find %s",s.str()); continue; } offset_t totsz = 0; n = 11; for (unsigned k=0;k<400;k++) { Owned<IDistributedFilePart> part = dfile->getPart(n); if (!part) { ERROR2("part not found %d %d",t,n); continue; } s.clear().append("192.168.").append(n/256).append('.').append(n%256); Owned<INode> node = createINode(s.str()); if (!node->equals(part->queryNode())) ERROR2("part node mismatch %d, %d",t,n); if (part->getFileSize(false,false)!=(n*777+t)*recsize) ERROR4("size node mismatch %d, %d, %d, %d",t,n,(unsigned)part->getFileSize(false,false),(n*777+t)*recsize); s.clear().append("daregress_test").append(t).append("._").append(n+1).append("_of_400"); /* ** TBD if (stricmp(s.str(),part->queryPartName())!=0) ERROR4("part name mismatch %d, %d '%s' '%s'",t,n,s.str(),part->queryPartName()); */ totsz += (n*777+t)*recsize; if ((samples>0)&&(i+n+t==k)) { samples--; RemoteFilename rfn; part->getFilename(rfn,samples%2); StringBuffer fn; rfn.getRemotePath(fn); printf("SAMPLE: %d,%d %s\n",t,n,fn.str()); } n = (n+11)%400; } if (totsz!=dfile->getFileSize(false,false)) ERROR1("total size mismatch %d",t); t = (t+33)%100; } printf("DFile lookup done - 100 files\n"); // check iteration __int64 crctot = 0; unsigned np = 0; unsigned totrows = 0; Owned<IDistributedFileIterator> fiter = queryDistributedFileDirectory().getIterator("daregress::*",false); Owned<IDistributedFilePartIterator> piter; ForEach(*fiter) { piter.setown(fiter->query().getIterator()); ForEach(*piter) { RemoteFilename rfn; StringBuffer s; piter->query().getFilename(rfn,0); rfn.getRemotePath(s); piter->query().getFilename(rfn,1); rfn.getRemotePath(s); crctot += crc32(s.str(),s.length(),0); np++; totrows += (unsigned)(piter->query().getFileSize(false,false)/fiter->query().queryProperties().getPropInt("@recordSize",-1)); } } piter.clear(); fiter.clear(); printf("DFile iterate done - %d parts, %d rows, CRC sum %"I64F"d\n",np,totrows,crctot); Owned<IDistributedSuperFile> sfile; sfile.setown(queryDistributedFileDirectory().createSuperFile("daregress::superfile1",true)); for (i = 0;i<100;i++) { s.clear().append("daregress::test").append(i); sfile->addSubFile(s.str()); } sfile.clear(); sfile.setown(queryDistributedFileDirectory().lookupSuperFile("daregress::superfile1")); if (!sfile) { ERROR("Could not find added superfile"); return; } __int64 savcrc = crctot; crctot = 0; np = 0; totrows = 0; size32_t srs = (size32_t)sfile->queryProperties().getPropInt("@recordSize",-1); if (srs!=17) ERROR1("Superfile does not match subfile row size %d",srs); piter.setown(sfile->getIterator()); ForEach(*piter) { RemoteFilename rfn; StringBuffer s; piter->query().getFilename(rfn,0); rfn.getRemotePath(s); piter->query().getFilename(rfn,1); rfn.getRemotePath(s); crctot += crc32(s.str(),s.length(),0); np++; totrows += (unsigned)(piter->query().getFileSize(false,false)/srs); } piter.clear(); printf("Superfile iterate done - %d parts, %d rows, CRC sum %"I64F"d\n",np,totrows,crctot); if (crctot!=savcrc) ERROR("SuperFile does not match sub files"); unsigned tr = (unsigned)(sfile->getFileSize(false,false)/srs); if (totrows!=tr) ERROR1("Superfile size does not match part sum %d",tr); sfile->detach(); sfile.clear(); sfile.setown(queryDistributedFileDirectory().lookupSuperFile("daregress::superfile1")); if (sfile) ERROR("Superfile deletion failed"); t = 37; for (i=0;i<100;i++) { s.clear().append("daregress::test").append(t); if (i%1) { Owned<IDistributedFile> dfile = queryDistributedFileDirectory().lookup(s.str()); if (!dfile) ERROR1("Could not find %s",s.str()); dfile->detach(); } else queryDistributedFileDirectory().removeEntry(s.str()); t = (t+37)%100; } printf("DFile removal complete\n"); t = 39; for (i=0;i<100;i++) { if (queryDistributedFileDirectory().exists(s.str())) ERROR1("Found %s after deletion",s.str()); Owned<IDistributedFile> dfile = queryDistributedFileDirectory().lookup(s.str()); if (dfile) ERROR1("Found %s after deletion",s.str()); t = (t+39)%100; } printf("DFile removal check complete\n"); queryNamedGroupStore().remove("daregress_group"); if (queryNamedGroupStore().lookup("daregress_group")) ERROR("Named group not removed"); }
MODULE_EXIT() { servers.kill(); // should already be clear when stopped serverConfig.clear(); delete stopServerCrit; }
bool CEclDirectEx::onRunEclEx(IEspContext &context, IEspRunEclExRequest & req, IEspRunEclExResponse & resp) { if (!context.validateFeatureAccess(ECLDIRECT_ACCESS, SecAccess_Full, false)) throw MakeStringException(-1, "EclDirect access permission denied."); const char* eclText = req.getEclText(); if (!eclText || !*eclText) { resp.setResults("<Exception><Source>ESP</Source><Message>No Ecl Text provided</Message></Exception>"); return true; } StringBuffer user; if (!context.getUserID(user).length()) user.append(req.getUserName()); Owned <IWorkUnitFactory> factory = getWorkUnitFactory(context.querySecManager(), context.queryUser()); Owned <IWorkUnit> workunit; if (!user.length()) workunit.setown(factory->createWorkUnit(NULL, "ECL-Direct", "")); else { workunit.setown(factory->createWorkUnit(NULL, "ECL-Direct", user.str())); workunit->setUser(user.str()); } Owned<IWUQuery> query = workunit->updateQuery(); query->setQueryText(eclText); query.clear(); const char* cluster = req.getCluster(); if (!cluster || !*cluster || !stricmp(cluster, "default")) cluster = defaultCluster.str(); if (!cluster || !*cluster) throw MakeStringException(-1, "No Cluster Specified"); if (!isValidCluster(cluster)) throw MakeStringException(-1, "Invalid TargetCluster %s Specified", cluster); workunit->setClusterName(cluster); const char* snapshot = req.getSnapshot(); if (snapshot && *snapshot) workunit->setSnapshot(snapshot); if (req.getResultLimit()) workunit->setResultLimit(req.getResultLimit()); // Execute it SCMStringBuffer wuid; workunit->getWuid(wuid); workunit->setAction(WUActionRun); workunit->setState(WUStateSubmitted); workunit.clear(); resp.setWuid(wuid.str()); submitWorkUnit(wuid.str(), context.querySecManager(), context.queryUser()); if (!waitForWorkUnitToComplete(wuid.str(), (req.getWait_isNull()) ? defaultWait : req.getWait())) { StringBuffer result; result.appendf("<Exception><Source>ESP</Source><Message>Timed out waiting for job to complete: %s</Message></Exception>", wuid.str()); resp.setResults(result.str()); return true; } if (!deleteWorkunits && context.queryRequestParameters()->hasProp("redirect")) { StringBuffer url("/WsWorkunits/WUInfo?Wuid="); resp.setRedirectUrl(url.append(wuid).str()); return true; } Owned<IConstWorkUnit> cw = factory->openWorkUnit(wuid.str(), false); EclDirectWUExceptions errors(*cw); resp.setErrors(errors); if (req.getIncludeResults()) { StringBuffer results; CRunEclExFormat outputFormat = req.getFormat(); Owned<IWuWebView> web = createWuWebView(wuid.str(), NULL, NULL, getCFD(), true); if (!web) results.appendf("<Exception><Source>ESP</Source><Message>Failed loading result workunit %s</Message></Exception>", wuid.str()); else if (outputFormat == CRunEclExFormat_Table) { StringBuffer xsltfile(getCFD()); web->applyResultsXSLT(xsltfile.append("xslt/wsecl3_result.xslt").str(), results); } else { unsigned xmlflags = 0; if (outputFormat != CRunEclExFormat_ExtendedXml) xmlflags |= WWV_OMIT_SCHEMAS; if (context.queryRequestParameters()->hasProp("display_xslt")) xmlflags |= WWV_USE_DISPLAY_XSLT; else xmlflags |= WWV_OMIT_XML_DECLARATION; web->expandResults(results, xmlflags); } resp.setResults(results.str()); } if (req.getIncludeGraphs()) { Owned<IConstWUGraphIterator> it = &cw->getGraphs(GraphTypeAny); StringBuffer xgmml("<Graphs>"); SCMStringBuffer s; ForEach(*it) xgmml.append(it->query().getXGMML(s, true).str()); xgmml.append("</Graphs>"); resp.setGraphsXGMML(xgmml.str()); } if (deleteWorkunits) deleteEclDirectWorkunit(factory, wuid.str()); return true; }
bool CEclDirectEx::onRunEcl(IEspContext &context, IEspRunEclRequest & req, IEspRunEclResponse & resp) { if (!context.validateFeatureAccess(ECLDIRECT_ACCESS, SecAccess_Full, false)) throw MakeStringException(-1, "EclDirect access permission denied."); StringBuffer user; if (!context.getUserID(user).length()) user.append(req.getUserName()); Owned <IWorkUnitFactory> factory = getWorkUnitFactory(context.querySecManager(), context.queryUser()); Owned <IWorkUnit> workunit; if (!user.length()) workunit.setown(factory->createWorkUnit(NULL, "ECL-Direct", "")); else { workunit.setown(factory->createWorkUnit(NULL, "ECL-Direct", user.str())); workunit->setUser(user.str()); } Owned<IWUQuery> query = workunit->updateQuery(); query->setQueryText(req.getEclText()); query.clear(); const char* clustername = req.getCluster(); if (!clustername || !*clustername || strieq(clustername, "default")) clustername = defaultCluster.str(); if (!clustername || !*clustername) throw MakeStringException(-1, "No Cluster Specified"); if (!isValidCluster(clustername)) throw MakeStringException(-1, "Invalid TargetCluster %s Specified", clustername); workunit->setClusterName(clustername); if (req.getLimitResults()) workunit->setResultLimit(100); const char* snapshot = req.getSnapshot(); if (snapshot && *snapshot) workunit->setSnapshot(snapshot); // Execute it SCMStringBuffer wuid; workunit->getWuid(wuid); workunit->setAction(WUActionRun); workunit->setState(WUStateSubmitted); workunit.clear(); submitWorkUnit(wuid.str(), context.querySecManager(), context.queryUser()); if (waitForWorkUnitToComplete(wuid.str(), defaultWait)) { Owned<IConstWorkUnit> cw = factory->openWorkUnit(wuid.str(), false); SCMStringBuffer resultXML; getFullWorkUnitResultsXML(context.queryUserId(), context.queryPassword(), cw.get(), resultXML); resp.setResults(resultXML.str()); cw.clear(); if (deleteWorkunits) deleteEclDirectWorkunit(factory, wuid.str()); } else { // Don't delete these ones... DBGLOG("WorkUnit %s timed out", wuid.str()); StringBuffer result; result.appendf("<Exception><Source>ESP</Source><Message>Timed out waiting for job to complete: %s</Message></Exception>", wuid.str()); resp.setResults(result.str()); } return true; }
void kill() { ActPrintLog("MSortSlaveActivity::kill"); sorter.clear(); CSlaveActivity::kill(); }
virtual void kill() { sorter.clear(); CSlaveActivity::kill(); }
int main(int argc,char **argv) { InitModuleObjects(); EnableSEHtoExceptionMapping(); #ifndef __64BIT__ // Restrict stack sizes on 32-bit systems Thread::setDefaultStackSize(0x10000); // 64K stack (also set in windows DSP) #endif Owned<IFile> sentinelFile = createSentinelTarget(); removeSentinelFile(sentinelFile); SocketEndpoint listenep; unsigned sendbufsize = 0; unsigned recvbufsize = 0; int i = 1; bool isdaemon = (memicmp(argv[0]+strlen(argv[0])-4,".exe",4)==0); // bit of a kludge for windows - if .exe not specified then not daemon bool locallisten = false; const char *logdir=NULL; bool requireauthenticate = false; StringBuffer logDir; StringBuffer instanceName; //Get SSL Settings const char * sslCertFile; bool useSSL; unsigned short dafsPort;//DAFILESRV_PORT or SECURE_DAFILESRV_PORT querySecuritySettings(&useSSL, &dafsPort, &sslCertFile, NULL); unsigned parallelRequestLimit = DEFAULT_PARALLELREQUESTLIMIT; unsigned throttleDelayMs = DEFAULT_THROTTLEDELAYMS; unsigned throttleCPULimit = DEFAULT_THROTTLECPULIMIT; Owned<IPropertyTree> env = getHPCCEnvironment(); if (env) { StringBuffer dafilesrvPath("Software/DafilesrvProcess"); if (instanceName.length()) dafilesrvPath.appendf("[@name=\"%s\"]", instanceName.str()); IPropertyTree *daFileSrv = env->queryPropTree(dafilesrvPath); if (daFileSrv) { // global DaFileSrv settings: parallelRequestLimit = daFileSrv->getPropInt("@parallelRequestLimit", DEFAULT_PARALLELREQUESTLIMIT); throttleDelayMs = daFileSrv->getPropInt("@throttleDelayMs", DEFAULT_THROTTLEDELAYMS); throttleCPULimit = daFileSrv->getPropInt("@throttleCPULimit", DEFAULT_THROTTLECPULIMIT); // any overrides by Instance definitions? // NB: This won't work if netAddress is "." or if we start supporting hostnames there StringBuffer ipStr; queryHostIP().getIpText(ipStr); VStringBuffer daFileSrvPath("Instance[@netAddress=\"%s\"]", ipStr.str()); IPropertyTree *dafileSrvInstance = daFileSrv->queryPropTree(daFileSrvPath); if (dafileSrvInstance) { parallelRequestLimit = dafileSrvInstance->getPropInt("@parallelRequestLimit", parallelRequestLimit); throttleDelayMs = dafileSrvInstance->getPropInt("@throttleDelayMs", throttleDelayMs); throttleCPULimit = dafileSrvInstance->getPropInt("@throttleCPULimit", throttleCPULimit); } } } while (argc>i) { if (stricmp(argv[i],"-D")==0) { i++; isdaemon = true; } else if (stricmp(argv[i],"-R")==0) { // for remote run i++; #ifdef _WIN32 isdaemon = false; #else isdaemon = true; #endif } else if (stricmp(argv[i],"-A")==0) { i++; requireauthenticate = true; } else if ((argv[i][0]=='-')&&(toupper(argv[i][1])=='T')&&(!argv[i][2]||isdigit(argv[i][2]))) { if (argv[i][2]) setDafsTrace(NULL,(byte)atoi(argv[i]+2)); i++; isdaemon = false; } else if ((argc>i+1)&&(stricmp(argv[i],"-L")==0)) { i++; logDir.clear().append(argv[i++]); } else if ((argc>i+1)&&(stricmp(argv[i],"-I")==0)) { i++; instanceName.clear().append(argv[i++]); } else if (stricmp(argv[i],"-LOCAL")==0) { i++; locallisten = true; } else if (stricmp(argv[i],"-NOSSL")==0) {//overrides config setting i++; if (useSSL) { PROGLOG("DaFileSrv SSL specified in config but overridden by -NOSSL in command line"); useSSL = false; dafsPort = DAFILESRV_PORT; } } else break; } if (useSSL && !sslCertFile) { ERRLOG("DaFileSrv SSL specified but certificate file information missing from environment.conf"); exit(-1); } if (0 == logDir.length()) { getConfigurationDirectory(NULL,"log","dafilesrv",instanceName.str(),logDir); if (0 == logDir.length()) logDir.append("."); } if (instanceName.length()) { addPathSepChar(logDir); logDir.append(instanceName.str()); } #ifdef _WIN32 if ((argc>i)&&(stricmp(argv[i],"-install")==0)) { if (installService(DAFS_SERVICE_NAME,DAFS_SERVICE_DISPLAY_NAME,NULL)) { PROGLOG(DAFS_SERVICE_DISPLAY_NAME " Installed"); return 0; } return 1; } if ((argc>i)&&(stricmp(argv[i],"-remove")==0)) { if (uninstallService(DAFS_SERVICE_NAME,DAFS_SERVICE_DISPLAY_NAME)) { PROGLOG(DAFS_SERVICE_DISPLAY_NAME " Uninstalled"); return 0; } return 1; } #endif if (argc == i) listenep.port = dafsPort; else { if (strchr(argv[i],'.')||!isdigit(argv[i][0])) listenep.set(argv[i], dafsPort); else listenep.port = atoi(argv[i]); if (listenep.port==0) { usage(); exit(-1); } sendbufsize = (argc>i+1)?(atoi(argv[i+1])*1024):0; recvbufsize = (argc>i+2)?(atoi(argv[i+2])*1024):0; } if (isdaemon) { #ifdef _WIN32 class cserv: public CService { bool stopped; bool started; SocketEndpoint listenep; bool useSSL; bool requireauthenticate; class cpollthread: public Thread { cserv *parent; public: cpollthread( cserv *_parent ) : Thread("CService::cpollthread"), parent(_parent) { } int run() { while (parent->poll()) Sleep(1000); return 1; } } pollthread; Owned<IRemoteFileServer> server; public: cserv(SocketEndpoint _listenep, bool _useSSL) : listenep(_listenep),useSSL(_useSSL),pollthread(this) { stopped = false; started = false; } virtual ~cserv() { stopped = true; if (started) pollthread.join(); } bool init() { PROGLOG(DAFS_SERVICE_DISPLAY_NAME " Initialized"); started = true; pollthread.start(); return true; } bool poll() { if (stopped||!running()) { PROGLOG(DAFS_SERVICE_DISPLAY_NAME " Stopping"); if (server) { server->stop(); server.clear(); } return false; } return true; } void run() { // Get params from HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\DaFileSrv\Parameters int requireauthenticate=0; HKEY hkey; if (RegOpenKeyEx(HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Services\\DaFileSrv\\Parameters", 0, KEY_QUERY_VALUE, &hkey) == ERROR_SUCCESS) { DWORD dwType = 0; DWORD dwSize = sizeof(requireauthenticate); RegQueryValueEx(hkey, "RequireAuthentication", NULL, &dwType, (BYTE*)&requireauthenticate, &dwSize); RegCloseKey(hkey); } StringBuffer eps; if (listenep.isNull()) eps.append(listenep.port); else listenep.getUrlStr(eps); enableDafsAuthentication(requireauthenticate!=0); PROGLOG("Opening " DAFS_SERVICE_DISPLAY_NAME " on %s%s", useSSL?"SECURE ":"",eps.str()); const char * verstring = remoteServerVersionString(); PROGLOG("Version: %s", verstring); PROGLOG("Authentication:%s required",requireauthenticate?"":" not"); PROGLOG(DAFS_SERVICE_DISPLAY_NAME " Running"); server.setown(createRemoteFileServer(parallelRequestLimit, throttleDelayMs, throttleCPULimit)); try { server->run(listenep, useSSL); } catch (IException *e) { EXCLOG(e,DAFS_SERVICE_NAME); e->Release(); } PROGLOG(DAFS_SERVICE_DISPLAY_NAME " Stopped"); stopped = true; } } service(listenep, useSSL); service.start(); return 0; #else int ret = initDaemon(); if (ret) return ret; #endif } { Owned<IComponentLogFileCreator> lf = createComponentLogFileCreator(logDir.str(), "DAFILESRV"); lf->setCreateAliasFile(false); lf->setMaxDetail(TopDetail); lf->beginLogging(); } PROGLOG("Parallel request limit = %d, throttleDelayMs = %d, throttleCPULimit = %d", parallelRequestLimit, throttleDelayMs, throttleCPULimit); const char * verstring = remoteServerVersionString(); StringBuffer eps; if (listenep.isNull()) eps.append(listenep.port); else listenep.getUrlStr(eps); enableDafsAuthentication(requireauthenticate); PROGLOG("Opening Dali File Server on %s%s", useSSL?"SECURE ":"",eps.str()); PROGLOG("Version: %s", verstring); PROGLOG("Authentication:%s required",requireauthenticate?"":" not"); startPerformanceMonitor(10*60*1000, PerfMonStandard); server.setown(createRemoteFileServer(parallelRequestLimit, throttleDelayMs, throttleCPULimit)); writeSentinelFile(sentinelFile); try { server->run(listenep, useSSL); } catch (IException *e) { EXCLOG(e,"DAFILESRV"); e->Release(); } if (server) server->stop(); server.clear(); PROGLOG("Stopped Dali File Server"); return 0; }
void kill() { CMasterActivity::kill(); originalIndexFile.clear(); newIndexFile.clear(); }
virtual ~CEspBindingEntry() { sock_.clear(); binding_.clear(); }
virtual void init() { CMasterActivity::init(); OwnedRoxieString indexFileName(helper->getIndexFileName()); Owned<IDistributedFile> dataFile; Owned<IDistributedFile> indexFile = queryThorFileManager().lookup(container.queryJob(), indexFileName, false, 0 != (helper->getJoinFlags() & JFindexoptional), true); unsigned keyReadWidth = (unsigned)container.queryJob().getWorkUnitValueInt("KJKRR", 0); if (!keyReadWidth || keyReadWidth>container.queryJob().querySlaves()) keyReadWidth = container.queryJob().querySlaves(); initMb.clear(); initMb.append(indexFileName.get()); if (helper->diskAccessRequired()) numTags += 2; initMb.append(numTags); unsigned t=0; for (; t<numTags; t++) { tags[t] = container.queryJob().allocateMPTag(); initMb.append(tags[t]); } bool keyHasTlk = false; if (indexFile) { unsigned numParts = 0; localKey = indexFile->queryAttributes().getPropBool("@local"); if (container.queryLocalData() && !localKey) throw MakeActivityException(this, 0, "Keyed Join cannot be LOCAL unless supplied index is local"); checkFormatCrc(this, indexFile, helper->getIndexFormatCrc(), true); Owned<IFileDescriptor> indexFileDesc = indexFile->getFileDescriptor(); IDistributedSuperFile *superIndex = indexFile->querySuperFile(); unsigned superIndexWidth = 0; unsigned numSuperIndexSubs = 0; if (superIndex) { numSuperIndexSubs = superIndex->numSubFiles(true); bool first=true; // consistency check Owned<IDistributedFileIterator> iter = superIndex->getSubFileIterator(true); ForEach(*iter) { IDistributedFile &f = iter->query(); unsigned np = f.numParts()-1; IDistributedFilePart &part = f.queryPart(np); const char *kind = part.queryAttributes().queryProp("@kind"); bool hasTlk = NULL != kind && 0 == stricmp("topLevelKey", kind); // if last part not tlk, then deemed local (might be singlePartKey) if (first) { first = false; keyHasTlk = hasTlk; superIndexWidth = f.numParts(); if (keyHasTlk) --superIndexWidth; } else { if (hasTlk != keyHasTlk) throw MakeActivityException(this, 0, "Local/Single part keys cannot be mixed with distributed(tlk) keys in keyedjoin"); if (keyHasTlk && superIndexWidth != f.numParts()-1) throw MakeActivityException(this, 0, "Super sub keys of different width cannot be mixed with distributed(tlk) keys in keyedjoin"); } } if (keyHasTlk) numParts = superIndexWidth * numSuperIndexSubs; else numParts = superIndex->numParts(); } else { numParts = indexFile->numParts(); if (numParts) { const char *kind = indexFile->queryPart(indexFile->numParts()-1).queryAttributes().queryProp("@kind"); keyHasTlk = NULL != kind && 0 == stricmp("topLevelKey", kind); if (keyHasTlk) --numParts; } } if (numParts) { initMb.append(numParts); initMb.append(superIndexWidth); // 0 if not superIndex initMb.append((superIndex && superIndex->isInterleaved()) ? numSuperIndexSubs : 0); unsigned p=0; UnsignedArray parts; for (; p<numParts; p++) parts.append(p); indexFileDesc->serializeParts(initMb, parts); if (localKey) keyHasTlk = false; // not used initMb.append(keyHasTlk); if (keyHasTlk) { if (numSuperIndexSubs) initMb.append(numSuperIndexSubs); else initMb.append((unsigned)1); Owned<IDistributedFileIterator> iter; IDistributedFile *f; if (superIndex) { iter.setown(superIndex->getSubFileIterator(true)); f = &iter->query(); } else f = indexFile; loop { unsigned location; OwnedIFile iFile; StringBuffer filePath; Owned<IFileDescriptor> fileDesc = f->getFileDescriptor(); Owned<IPartDescriptor> tlkDesc = fileDesc->getPart(fileDesc->numParts()-1); if (!getBestFilePart(this, *tlkDesc, iFile, location, filePath)) throw MakeThorException(TE_FileNotFound, "Top level key part does not exist, for key: %s", f->queryLogicalName()); OwnedIFileIO iFileIO = iFile->open(IFOread); assertex(iFileIO); size32_t tlkSz = (size32_t)iFileIO->size(); initMb.append(tlkSz); ::read(iFileIO, 0, tlkSz, initMb); if (!iter || !iter->next()) break; f = &iter->query(); } } if (helper->diskAccessRequired()) { OwnedRoxieString fetchFilename(helper->getFileName()); if (fetchFilename) { dataFile.setown(queryThorFileManager().lookup(container.queryJob(), fetchFilename, false, 0 != (helper->getFetchFlags() & FFdatafileoptional), true)); if (dataFile) { if (superIndex) throw MakeActivityException(this, 0, "Superkeys and full keyed joins are not supported"); Owned<IFileDescriptor> dataFileDesc = getConfiguredFileDescriptor(*dataFile); void *ekey; size32_t ekeylen; helper->getFileEncryptKey(ekeylen,ekey); bool encrypted = dataFileDesc->queryProperties().getPropBool("@encrypted"); if (0 != ekeylen) { memset(ekey,0,ekeylen); free(ekey); if (!encrypted) { Owned<IException> e = MakeActivityWarning(&container, TE_EncryptionMismatch, "Ignoring encryption key provided as file '%s' was not published as encrypted", helper->getFileName()); container.queryJob().fireException(e); } } else if (encrypted) throw MakeActivityException(this, 0, "File '%s' was published as encrypted but no encryption key provided", fetchFilename.get()); unsigned dataReadWidth = (unsigned)container.queryJob().getWorkUnitValueInt("KJDRR", 0); if (!dataReadWidth || dataReadWidth>container.queryJob().querySlaves()) dataReadWidth = container.queryJob().querySlaves(); Owned<IGroup> grp = container.queryJob().querySlaveGroup().subset((unsigned)0, dataReadWidth); dataFileMapping.setown(getFileSlaveMaps(dataFile->queryLogicalName(), *dataFileDesc, container.queryJob().queryUserDescriptor(), *grp, false, false, NULL)); dataFileMapping->serializeFileOffsetMap(offsetMapMb.clear()); } else indexFile.clear(); } } } else
void CWriteMasterBase::publish() { if (published) return; published = true; if (!(diskHelperBase->getFlags() & (TDXtemporary|TDXjobtemp))) updateActivityResult(container.queryJob().queryWorkUnit(), diskHelperBase->getFlags(), diskHelperBase->getSequence(), fileName, recordsProcessed); IPropertyTree &props = fileDesc->queryProperties(); props.setPropInt64("@recordCount", recordsProcessed); if (0 == (diskHelperBase->getFlags() & TDXtemporary) || container.queryJob().queryUseCheckpoints()) { if (0 != (diskHelperBase->getFlags() & TDWexpires)) setExpiryTime(props, diskHelperBase->getExpiryDays()); if (TDWupdate & diskHelperBase->getFlags()) { unsigned eclCRC; unsigned __int64 totalCRC; diskHelperBase->getUpdateCRCs(eclCRC, totalCRC); props.setPropInt("@eclCRC", eclCRC); props.setPropInt64("@totalCRC", totalCRC); } } container.queryTempHandler()->registerFile(fileName, container.queryOwner().queryGraphId(), diskHelperBase->getTempUsageCount(), TDXtemporary & diskHelperBase->getFlags(), getDiskOutputKind(diskHelperBase->getFlags()), &clusters); if (!dlfn.isExternal()) { bool temporary = 0 != (diskHelperBase->getFlags()&TDXtemporary); if (!temporary && (queryJob().querySlaves() < fileDesc->numParts())) { // create empty parts for a fileDesc being published that is larger than this clusters size32_t recordSize = 0; IOutputMetaData *diskRowMeta = diskHelperBase->queryDiskRecordSize()->querySerializedDiskMeta(); if (diskRowMeta->isFixedSize() && (TAKdiskwrite == container.getKind())) { recordSize = diskRowMeta->getMinRecordSize(); if (0 != (diskHelperBase->getFlags() & TDXgrouped)) recordSize += 1; } unsigned compMethod = COMPRESS_METHOD_LZW; // rowdiff used if recordSize > 0, else fallback to compMethod if (getOptBool(THOROPT_COMP_FORCELZW, false)) { recordSize = 0; // by default if fixed length (recordSize set), row diff compression is used. This forces compMethod. compMethod = COMPRESS_METHOD_LZW; } else if (getOptBool(THOROPT_COMP_FORCEFLZ, false)) compMethod = COMPRESS_METHOD_FASTLZ; else if (getOptBool(THOROPT_COMP_FORCELZ4, false)) compMethod = COMPRESS_METHOD_LZ4; bool blockCompressed; bool compressed = fileDesc->isCompressed(&blockCompressed); for (unsigned clusterIdx=0; clusterIdx<fileDesc->numClusters(); clusterIdx++) { StringBuffer clusterName; fileDesc->getClusterGroupName(clusterIdx, clusterName, &queryNamedGroupStore()); PROGLOG("Creating blank parts for file '%s', cluster '%s'", fileName.get(), clusterName.str()); unsigned p=0; while (p<fileDesc->numParts()) { if (p == targetOffset) p += queryJob().querySlaves(); IPartDescriptor *partDesc = fileDesc->queryPart(p); CDateTime createTime, modifiedTime; for (unsigned c=0; c<partDesc->numCopies(); c++) { RemoteFilename rfn; partDesc->getFilename(c, rfn); StringBuffer path; rfn.getPath(path); try { ensureDirectoryForFile(path.str()); OwnedIFile iFile = createIFile(path.str()); Owned<IFileIO> iFileIO; if (compressed) iFileIO.setown(createCompressedFileWriter(iFile, recordSize, false, true, NULL, compMethod)); else iFileIO.setown(iFile->open(IFOcreate)); dbgassertex(iFileIO.get()); iFileIO.clear(); // ensure copies have matching datestamps, as they would do normally (backupnode expects it) if (partDesc->numCopies() > 1) { if (0 == c) iFile->getTime(&createTime, &modifiedTime, NULL); else iFile->setTime(&createTime, &modifiedTime, NULL); } } catch (IException *e) { if (0 == c) throw; Owned<IThorException> e2 = MakeThorException(e); e->Release(); e2->setAction(tea_warning); queryJob().fireException(e2); } } partDesc->queryProperties().setPropInt64("@size", 0); p++; } clusterIdx++; } } queryThorFileManager().publish(container.queryJob(), fileName, *fileDesc, NULL); } }
void ParseErrorHandler::resetErrors() { m_errors.clear(); }
//----------------------------------------------------- // //----------------------------------------------------- int main(int argc, char* argv[]) { #ifdef _NO_LDAP fprintf(stderr, "System was built with _NO_LDAP\n"); return -1; #endif for (int x = 1; x < argc; x++) { if (0==strncmp("-h", argv[x], 2)) { usage(); exit(0); } else { fprintf(stderr, "\nERROR: Unrecognized parameter : '%s', enter 'initldap -h' for help\n", argv[x]); exit(1); } } InitModuleObjects(); //execute configgen to query the LDAP Server configuration(s) StringBuffer cmd; cmd.appendf("%s%cconfiggen -env %s%c%s -listldapservers", ADMIN_DIR,PATHSEPCHAR,CONFIG_DIR, PATHSEPCHAR, ENV_XML_FILE); char * configBuffer = NULL; //acquire LDAP configuration by executing configgen and capturing output { StringBuffer configBuff; Owned<IPipeProcess> pipe = createPipeProcess(); if (pipe->run("configgen", cmd.str(), ".", false, true, true, 0)) { Owned<ISimpleReadStream> pipeReader = pipe->getOutputStream(); const size32_t chunkSize = 8192; for (;;) { size32_t sizeRead = pipeReader->read(chunkSize, configBuff.reserve(chunkSize)); if (sizeRead < chunkSize) { configBuff.setLength(configBuff.length() - (chunkSize - sizeRead)); break; } } pipe->closeOutput(); } int retcode = pipe->wait(); if (retcode) { fprintf(stderr, "\nERROR %d: unable to execute %s", retcode, cmd.str()); exit(1); } configBuffer = strdup(configBuff.str()); } //Using the LDAP Server parms queried from configgen, build an //LDAPSecurity property tree for each LDAP Server and call the LDAP //Security Manager to create the needed entries Owned<IPropertyTree> ldapProps; char *saveptr; char * pLine = strtok_r(configBuffer, "\n", &saveptr); while (pLine) { if (pLine && 0==strcmp(pLine, "LDAPServerProcess")) { if (ldapProps) initLDAP(ldapProps); ldapProps.clear(); ldapProps.setown(createPTree("ldapSecurity")); } else { char * sep = strchr(pLine, ','); if (sep) { *sep = (char)NULL; ldapProps->addProp(pLine, sep+1); } } pLine = strtok_r(NULL, "\n", &saveptr); } if (ldapProps) initLDAP(ldapProps); if (configBuffer) free(configBuffer); ldapProps.clear(); releaseAtoms(); return 0; }