virtual void init() { OwnedRoxieString fname(helper->getFileName()); dlfn.set(fname); isLocal = 0 != (TIWlocal & helper->getFlags()); unsigned minSize = helper->queryDiskRecordSize()->getMinRecordSize(); if (minSize > KEYBUILD_MAXLENGTH) throw MakeActivityException(this, 0, "Index minimum record length (%d) exceeds %d internal limit", minSize, KEYBUILD_MAXLENGTH); unsigned maxSize = helper->queryDiskRecordSize()->getRecordSize(NULL); if (maxSize > KEYBUILD_MAXLENGTH) throw MakeActivityException(this, 0, "Index maximum record length (%d) exceeds %d internal limit. Minimum size = %d, try setting index MAXLENGTH", maxSize, KEYBUILD_MAXLENGTH, minSize); singlePartKey = 0 != (helper->getFlags() & TIWsmall) || dlfn.isExternal(); clusters.kill(); unsigned idx=0; while (true) { OwnedRoxieString cluster(helper->getCluster(idx)); if(!cluster) break; clusters.append(cluster); idx++; } IArrayOf<IGroup> groups; if (singlePartKey) { isLocal = true; buildTlk = false; } else if (!isLocal || globals->getPropBool("@buildLocalTlks", true)) buildTlk = true; fillClusterArray(container.queryJob(), fname, clusters, groups); unsigned restrictedWidth = 0; if (TIWhaswidth & helper->getFlags()) { restrictedWidth = helper->getWidth(); if (restrictedWidth > container.queryJob().querySlaves()) throw MakeActivityException(this, 0, "Unsupported, can't refactor to width(%d) larger than host cluster(%d)", restrictedWidth, container.queryJob().querySlaves()); else if (restrictedWidth < container.queryJob().querySlaves()) { if (!isLocal) throw MakeActivityException(this, 0, "Unsupported, refactoring to few parts only supported for local indexes."); assertex(!singlePartKey); unsigned gwidth = groups.item(0).ordinality(); if (0 != container.queryJob().querySlaves() % gwidth) throw MakeActivityException(this, 0, "Unsupported, refactored target size (%d) must be factor of thor cluster width (%d)", groups.item(0).ordinality(), container.queryJob().querySlaves()); if (0 == restrictedWidth) restrictedWidth = gwidth; ForEachItemIn(g, groups) { IGroup &group = groups.item(g); if (gwidth != groups.item(g).ordinality()) throw MakeActivityException(this, 0, "Unsupported, cannot output multiple refactored widths, targeting cluster '%s' and '%s'", clusters.item(0), clusters.item(g)); if (gwidth != restrictedWidth) groups.replace(*group.subset((unsigned)0, restrictedWidth), g); } refactor = true; }
//Outputs void addOutputDatasetColumn(const char * datasetName, CColumn * _output) { if (0 == m_outputDatasets.ordinality()) m_outputDatasets.append(*(new CTable(datasetName,NULL,NULL)));//add new output dataset else { CTable & dataset = m_outputDatasets.item(m_outputDatasets.ordinality() - 1); if (0 != strcmp(datasetName, dataset.queryName())) m_outputDatasets.append(*(new CTable(datasetName,NULL,NULL)));//add new output dataset } CTable & dataset = m_outputDatasets.item(m_outputDatasets.ordinality() - 1); dataset.addColumn(_output); }
static bool deleteEmptyDir(IFile *dir) { // this is a bit odd - basically we already know no files but there may be empty sub-dirs Owned<IDirectoryIterator> iter = dir->directoryFiles(NULL,false,true); IArrayOf<IFile> subdirs; bool candelete = true; ForEach(*iter) { if (iter->isDir()) subdirs.append(iter->get()); else candelete = false; } if (!candelete) return false; try { ForEachItemIn(i,subdirs) { if (!deleteEmptyDir(&subdirs.item(i))) candelete = false; } } catch (IException *e) { EXCLOG(e,"deleteEmptyDir"); candelete = false; } if (!candelete) return false; static CriticalSection sect; CriticalBlock block(sect); // don't want to actually remove in parallel dir->remove(); return !dir->exists(); }
void CWriteMasterBase::init() { published = false; recordsProcessed = 0; dlfn.set(diskHelperBase->getFileName()); if (diskHelperBase->getFlags() & TDWextend) { assertex(0 == (diskHelperBase->getFlags() & (TDXtemporary|TDXjobtemp))); Owned<IDistributedFile> file = queryThorFileManager().lookup(container.queryJob(), diskHelperBase->getFileName(), false, true); if (file.get()) { fileDesc.setown(file->getFileDescriptor()); queryThorFileManager().noteFileRead(container.queryJob(), file, true); } } if (dlfn.isExternal()) mpTag = container.queryJob().allocateMPTag(); // used if (NULL == fileDesc.get()) { bool overwriteok = 0!=(TDWoverwrite & diskHelperBase->getFlags()); unsigned idx=0; while (diskHelperBase->queryCluster(idx)) clusters.append(diskHelperBase->queryCluster(idx++)); IArrayOf<IGroup> groups; fillClusterArray(container.queryJob(), diskHelperBase->getFileName(), clusters, groups); fileDesc.setown(queryThorFileManager().create(container.queryJob(), diskHelperBase->getFileName(), clusters, groups, overwriteok, diskHelperBase->getFlags())); if (1 == groups.ordinality()) targetOffset = getGroupOffset(groups.item(0), container.queryJob().querySlaveGroup()); IPropertyTree &props = fileDesc->queryProperties(); if (diskHelperBase->getFlags() & (TDWowned|TDXjobtemp|TDXtemporary)) props.setPropBool("@owned", true); if (diskHelperBase->getFlags() & TDWresult) props.setPropBool("@result", true); const char *rececl= diskHelperBase->queryRecordECL(); if (rececl&&*rececl) props.setProp("ECL", rececl); bool blockCompressed=false; void *ekey; size32_t ekeylen; diskHelperBase->getEncryptKey(ekeylen,ekey); if (ekeylen) { memset(ekey,0,ekeylen); free(ekey); props.setPropBool("@encrypted", true); blockCompressed = true; } else if (0 != (diskHelperBase->getFlags() & TDWnewcompress) || 0 != (diskHelperBase->getFlags() & TDXcompress)) blockCompressed = true; if (blockCompressed) props.setPropBool("@blockCompressed", true); if (TAKdiskwrite == container.getKind() && (0 != (diskHelperBase->getFlags() & TDXtemporary)) && container.queryOwner().queryOwner() && (!container.queryOwner().isGlobal())) // I am in a child query { // do early, because this will be local act. and will not come back to master until end of owning graph. publish(); } } }
bool CWsDfuXRefEx::onDFUXRefList(IEspContext &context, IEspDFUXRefListRequest &req, IEspDFUXRefListResponse &resp) { try { if (!context.validateFeatureAccess(FEATURE_URL, SecAccess_Read, false)) throw MakeStringException(ECLWATCH_DFU_XREF_ACCESS_DENIED, "Failed to access Xref. Permission denied."); StringBuffer username; context.getUserID(username); DBGLOG("CWsDfuXRefEx::onDFUXRefList User=%s",username.str()); //Firstly we need to get a list of the available Thor Cluster.... IArrayOf<IEspTpCluster> clusters; CTpWrapper _topology; _topology.getClusterProcessList(eqThorCluster,clusters,false,true); ///_topology.getClusterList(eqRoxieCluster,clusters,false,true); Owned<IPropertyTree> pXRefNodeTree = createPTree("XRefNodes"); //DBGLOG("CWsDfuXRefEx::onDFUXRefList1\n"); for (unsigned x=0;x<=clusters.ordinality();x++) { IPropertyTree* XRefTreeNode = pXRefNodeTree->addPropTree("XRefNode", createPTree(ipt_caseInsensitive)); IEspTpCluster* cluster = x<clusters.ordinality()?&clusters.item(x):NULL; const char *clustername = cluster?cluster->getName():"SuperFiles"; XRefTreeNode->setProp("Name",clustername); //create the node if it doesn;t exist Owned<IXRefNode> xRefNode = XRefNodeManager->getXRefNode(clustername); if (xRefNode == 0) { XRefTreeNode->setProp("Modified",""); XRefTreeNode->setProp("Status","Not Run"); } else { StringBuffer buf; XRefTreeNode->setProp("Modified",xRefNode->getLastModified(buf).str()); buf.clear(); XRefTreeNode->setProp("Status",xRefNode->getStatus(buf).str()); } } StringBuffer buf; resp.setDFUXRefListResult(toXML(pXRefNodeTree, buf).str()); } catch(IException* e) { FORWARDEXCEPTION(context, e, ECLWATCH_INTERNAL_ERROR); } return true; }
static void stopServer() { CriticalBlock b(*stopServerCrit); // NB: will not protect against abort handler, which will interrupt thread and be on same TID. if (serverStopped) return; serverStopped = true; ForEachItemInRev(h,servers) { IDaliServer &server=servers.item(h); LOG(MCprogress, unknownJob, "Suspending %d",h); server.suspend(); }
void getGroupByString(char delimiter, StringBuffer & str) { int groupbycount = groupbyList.length(); for (int i = 0; i < groupbycount; i++) { ISQLExpression * ordercol = &groupbyList.item(i); SQLFieldValueExpression * colexp = dynamic_cast<SQLFieldValueExpression *>(ordercol); str.append(colexp->getName()); if (i != groupbycount - 1) str.append(delimiter); } }
void getOrderByString(char delimiter, StringBuffer & str) { int orderbycount = orderbyList.length(); for (int i = 0; i < orderbycount; i++) { ISQLExpression* ordercol = &orderbyList.item(i); SQLFieldValueExpression* colexp = dynamic_cast<SQLFieldValueExpression*>(ordercol); str.append(colexp->isAscending() ? "" : "-"); str.append(colexp->getNameOrAlias()); if (i != orderbycount - 1) str.append(delimiter); } }
void CResPermissionsCache::add( IArrayOf<ISecResource>& resources ) { time_t tstamp; time(&tstamp); int nresources = resources.ordinality(); for (int i = 0; i < nresources; i++) { ISecResource* secResource = &resources.item(i); if(!secResource) continue; const char* resource = secResource->getName(); SecResourceType resourcetype = secResource->getResourceType(); if(resource == NULL) continue; int permissions = secResource->getAccessFlags(); if(permissions == -1) continue; MapResAccess::iterator it = m_resAccessMap.find(SecCacheKeyEntry(resource, resourcetype)); if (it != m_resAccessMap.end())//already exists so overwrite it but first remove existing timestamp info { ResPermCacheEntry& resParamCacheEntry = (*it).second; time_t oldtstamp = resParamCacheEntry.first; //there may be multiple resources associated with the same timestamp //in the multimap so find this entry // MapTimeStamp::iterator itL = m_timestampMap.lower_bound( oldtstamp ); MapTimeStamp::iterator itU = m_timestampMap.upper_bound( oldtstamp ); MapTimeStamp::iterator its; for ( its = itL; its != itU; its++) { SecCacheKeyEntry& cachekey = (*its).second; if (cachekey.first == resource && cachekey.second == resourcetype) { m_timestampMap.erase(its); break; } } m_resAccessMap.erase(SecCacheKeyEntry(resource, resourcetype)); } #ifdef _DEBUG DBGLOG("CACHE: CResPermissionsCache Adding %s:%s(%d)", m_user.c_str(), resource, permissions); #endif m_resAccessMap.insert( pair<SecCacheKeyEntry, ResPermCacheEntry>(SecCacheKeyEntry(resource, resourcetype), ResPermCacheEntry(tstamp, secResource->clone()))); m_timestampMap.insert( pair<time_t, SecCacheKeyEntry>(tstamp, SecCacheKeyEntry(resource, resourcetype))); } }
void CWSESPControlEx::cleanSessions(bool allSessions, const char* _id, const char* _userID, const char* _fromIP) { StringBuffer searchPath; setSessionXPath(allSessions, _id, _userID, _fromIP, searchPath); Owned<IRemoteConnection> globalLock = querySDSConnectionForESPSession(RTM_LOCK_WRITE, SESSION_SDS_LOCK_TIMEOUT); Owned<IPropertyTreeIterator> iter = globalLock->queryRoot()->getElements("*"); ForEach(*iter) { IArrayOf<IPropertyTree> toRemove; Owned<IPropertyTreeIterator> iter1 = iter->query().getElements(searchPath.str()); ForEach(*iter1) toRemove.append(*LINK(&iter1->query())); ForEachItemIn(i, toRemove) iter->query().removeTree(&toRemove.item(i)); } }
bool CPermissionsCache::addManagedFileScopes(IArrayOf<ISecResource>& scopes) { WriteLockBlock writeLock(m_scopesRWLock); ForEachItemIn(x, scopes) { ISecResource* scope = &scopes.item(x); if(!scope) continue; const char* cachekey = scope->getName(); if(cachekey == NULL) continue; map<string, ISecResource*>::iterator it = m_managedFileScopesMap.find(cachekey); if (it != m_managedFileScopesMap.end()) { ISecResource *res = (*it).second; res->Release(); m_managedFileScopesMap.erase(it); } #ifdef _DEBUG DBGLOG("Caching Managed File Scope %s",cachekey); #endif m_managedFileScopesMap.insert( pair<string, ISecResource*>(cachekey, LINK(scope))); }
bool CBaseSecurityManager::updateSettings(ISecUser & sec_user,IArrayOf<ISecResource>& rlist) { CSecureUser* user = (CSecureUser*)&sec_user; if(user == NULL) return false; int usernum = findUser(user->getName(),user->getRealm()); if(usernum < 0) { PrintLog("User number of %s can't be found", user->getName()); return false; } bool sqchecked = false, sqverified = false, otpchecked = false; int otpok = -1; ForEachItemIn(x, rlist) { ISecResource* secRes = (ISecResource*)(&(rlist.item(x))); if(secRes == NULL) continue; //AccessFlags default value is -1. Set it to 0 so that the settings can be cached. AccessFlags is not being used for settings. secRes->setAccessFlags(0); if(secRes->getParameter("userprop") && *secRes->getParameter("userprop")!='\0') { //if we have a parameter in the user or company table it will have been added as a parameter to the ISecUser when // the authentication query was run. We should keep this messiness here so that the the end user is insulated.... dbValidateSetting(*secRes,sec_user); continue; } const char* resource_name = secRes->getParameter("resource"); if(resource_name && *resource_name && (stricmp(resource_name, "SSN Masking") == 0 || stricmp(resource_name, "Driver License Masking") == 0)) { //If OTP Enabled and OTP2FACTOR cookie not valid, mask if(m_enableOTP) { if(!otpchecked) { const char* otpcookie = sec_user.getProperty("OTP2FACTOR"); // -1 means OTP is not enabled for the user. 0: failed verfication, 1: passed verification. otpok = validateOTP(&sec_user, otpcookie); otpchecked = true; } if(otpok == 0) { CSecurityResource* cres = dynamic_cast<CSecurityResource*>(secRes); if(resource_name && *resource_name && cres) { if(stricmp(resource_name, "SSN Masking") == 0) { cres->setValue("All"); continue; } else if(stricmp(resource_name, "Driver License Masking") == 0) { cres->setValue("1"); continue; } } } else if(otpok == 1) { CSecurityResource* cres = dynamic_cast<CSecurityResource*>(secRes); if(resource_name && *resource_name && cres) { if(stricmp(resource_name, "SSN Masking") == 0) { cres->setValue("None"); continue; } else if(stricmp(resource_name, "Driver License Masking") == 0) { cres->setValue("0"); continue; } } } } if(m_enableIPRoaming && sec_user.getPropertyInt("IPRoaming") == 1) { if(!sqchecked) { const char* sequest = sec_user.getProperty("SEQUEST"); if(sequest && *sequest) { sqverified = validateSecurityQuestion(&sec_user, sequest); } sqchecked = true; } if(!sqverified) { CSecurityResource* cres = dynamic_cast<CSecurityResource*>(secRes); if(resource_name && *resource_name && cres) { if(stricmp(resource_name, "SSN Masking") == 0) { cres->setValue("All"); continue; } else if(stricmp(resource_name, "Driver License Masking") == 0) { cres->setValue("1"); continue; } } } } } dbValidateSetting(*secRes,usernum,user->getRealm()); }
CTable * queryOutputDataset(aindex_t idx) { return (CTable *)&m_outputDatasets.item(idx); }
int CResPermissionsCache::lookup( IArrayOf<ISecResource>& resources, bool* pFound ) { time_t tstamp; time(&tstamp); int timeout = m_pParentCache->getCacheTimeout(); if(timeout == 0 && m_pParentCache->isTransactionalEnabled()) timeout = 10; //Transactional timeout is set to 10 seconds for long transactions that might take over 10 seconds. tstamp -= timeout; if (m_tLastCleanup < tstamp) removeStaleEntries(tstamp); int nresources = resources.ordinality(); int nFound = 0; for (int i = 0; i < nresources; i++) { ISecResource& secResource = resources.item(i); const char* resource = secResource.getName(); if(resource == NULL) { *pFound++ = false; continue; } #ifdef _DEBUG DBGLOG("CACHE: CResPermissionsCache Looking up resource(%d of %d) %s:%s", i, nresources, m_user.c_str(), resource); #endif MapResAccess::iterator it = m_resAccessMap.find(SecCacheKeyEntry(resource, secResource.getResourceType())); if (it != m_resAccessMap.end())//exists in cache { ResPermCacheEntry& resParamCacheEntry = (*it).second; const time_t timestamp = resParamCacheEntry.first; if (timestamp < tstamp)//entry was not stale during last cleanup but is stale now *pFound++ = false; else if(!m_pParentCache->isCacheEnabled() && m_pParentCache->isTransactionalEnabled())//m_pParentCache->getOriginalTimeout() == 0) { time_t tctime = getThreadCreateTime(); if(tctime <= 0 || timestamp < tctime) { *pFound++ = false; } else { secResource.copy(resParamCacheEntry.second); #ifdef _DEBUG DBGLOG("CACHE: CResPermissionsCache FoundA %s:%s=>%d", m_user.c_str(), resource, ((ISecResource*)resParamCacheEntry.second)->getAccessFlags()); #endif *pFound++ = true; nFound++; } } else { secResource.copy(resParamCacheEntry.second); #ifdef _DEBUG DBGLOG("CACHE: CResPermissionsCache FoundB %s:%s=>%d", m_user.c_str(), resource, ((ISecResource*)resParamCacheEntry.second)->getAccessFlags()); #endif *pFound++ = true; nFound++; } } else *pFound++ = false; } return nFound; }
CColumn * queryColumn(aindex_t colNum) { return (CColumn *)&m_columns.item(colNum); }//zero based column indices
__int64 findPositionInRoxieQueryList(int type, const char *value, bool descend, IArrayOf<IEspRoxieQuery>& queries) { if (!value || (strlen(value) < 1)) { if (descend) return -1; else return 0; } __int64 addToPos = -1; ForEachItemIn(i, queries) { IEspRoxieQuery& query = queries.item(i); char *Value = NULL; switch (type) { case ROXIEQUERYID: Value = (char *) query.getID(); break; case ROXIEQUERYDEPLOYEDBY: Value = (char *) query.getDeployedBy(); break; case ROXIEQUERYUPDATEDBY: Value = (char *) query.getUpdatedBy(); break; case ROXIEQUERYWUID: Value = (char *) query.getWUID(); break; case ROXIEQUERYSUSPENDED: Value = (char *) query.getSuspended(); break; case ROXIEQUERYHIGHPRIORITY: Value = (char *) query.getHighPriority(); break; case ROXIEQUERYERROR: Value = (char *) query.getError(); break; case ROXIEQUERYCOMMENT: Value = (char *) query.getComment(); break; case ROXIEQUERYHASALIASES: Value = (char *) query.getHasAliases(); break; } if (!Value) continue; if (type != ROXIEQUERYID) { if (descend && strcmp(value, Value)>0) { addToPos = i; break; } if (!descend && strcmp(value, Value)<0) { addToPos = i; break; } } else { if (descend && stricmp(value, Value)>0) { addToPos = i; break; } if (!descend && stricmp(value, Value)<0) { addToPos = i; break; } } }
ForEachItemInRev(i,servers) { ISashaServer &server=servers.item(i); LOG(MCprogress, unknownJob, "Stopping %d",i); server.stop(); }
CColumn * queryInput(aindex_t idx) { return (CColumn*)&m_inputs.item(idx); }//zero based index