예제 #1
0
static bool deleteEmptyDir(IFile *dir)
{
    // this is a bit odd - basically we already know no files but there may be empty sub-dirs
    Owned<IDirectoryIterator> iter = dir->directoryFiles(NULL,false,true);
    IArrayOf<IFile> subdirs;
    bool candelete = true;
    ForEach(*iter) {
        if (iter->isDir()) 
            subdirs.append(iter->get());
        else
            candelete = false;
    }
    if (!candelete)
        return false;
    try {
        ForEachItemIn(i,subdirs) {
            if (!deleteEmptyDir(&subdirs.item(i)))
                candelete = false;
        }
    }
    catch (IException *e) {
        EXCLOG(e,"deleteEmptyDir");
        candelete = false;
    }
    if (!candelete)
        return false;
    static CriticalSection sect;
    CriticalBlock block(sect);      // don't want to actually remove in parallel
    dir->remove();
    return !dir->exists();
}
예제 #2
0
    virtual int processCMD()
    {
        StringBuffer s;
        Owned<IClientWsWorkunits> client = createWsWorkunitsClient();
        VStringBuffer url("http://%s:%s/WsWorkunits", optServer.sget(), optPort.sget());
        client->addServiceUrl(url.str());
        if (optUsername.length())
            client->setUsernameToken(optUsername.get(), optPassword.sget(), NULL);

        Owned<IClientWUQuerySetAliasActionRequest> req = client->createWUQuerysetAliasActionRequest();
        IArrayOf<IEspQuerySetAliasActionItem> aliases;
        Owned<IEspQuerySetAliasActionItem> item = createQuerySetAliasActionItem();
        item->setName(optAlias.get());
        aliases.append(*item.getClear());
        req->setAliases(aliases);

        req->setAction("Deactivate");
        req->setQuerySetName(optQuerySet.get());

        Owned<IClientWUQuerySetAliasActionResponse> resp = client->WUQuerysetAliasAction(req);
        IArrayOf<IConstQuerySetAliasActionResult> &results = resp->getResults();
        if (resp->getExceptions().ordinality())
            outputMultiExceptions(resp->getExceptions());
        else if (results.empty())
            fprintf(stderr, "\nError Empty Result!\n");
        else
        {
            IConstQuerySetAliasActionResult &item = results.item(0);
            if (item.getSuccess())
                fprintf(stdout, "Deactivated alias %s/%s\n", optQuerySet.sget(), optAlias.sget());
            else if (item.getCode()|| item.getMessage())
                fprintf(stderr, "Error (%d) %s\n", item.getCode(), item.getMessage());
        }
        return 0;
    }
예제 #3
0
    virtual void init()
    {
        OwnedRoxieString fname(helper->getFileName());
        dlfn.set(fname);
        isLocal = 0 != (TIWlocal & helper->getFlags());
        unsigned minSize = helper->queryDiskRecordSize()->getMinRecordSize();
        if (minSize > KEYBUILD_MAXLENGTH)
            throw MakeActivityException(this, 0, "Index minimum record length (%d) exceeds %d internal limit", minSize, KEYBUILD_MAXLENGTH);
        unsigned maxSize = helper->queryDiskRecordSize()->getRecordSize(NULL);
        if (maxSize > KEYBUILD_MAXLENGTH)
            throw MakeActivityException(this, 0, "Index maximum record length (%d) exceeds %d internal limit. Minimum size = %d, try setting index MAXLENGTH", maxSize, KEYBUILD_MAXLENGTH, minSize);

        singlePartKey = 0 != (helper->getFlags() & TIWsmall) || dlfn.isExternal();
        clusters.kill();
        unsigned idx=0;
        while (true)
        {
            OwnedRoxieString cluster(helper->getCluster(idx));
            if(!cluster)
                break;
            clusters.append(cluster);
            idx++;
        }
        IArrayOf<IGroup> groups;
        if (singlePartKey)
        {
            isLocal = true;
            buildTlk = false;
        }
        else if (!isLocal || globals->getPropBool("@buildLocalTlks", true))
            buildTlk = true;

        fillClusterArray(container.queryJob(), fname, clusters, groups);
        unsigned restrictedWidth = 0;
        if (TIWhaswidth & helper->getFlags())
        {
            restrictedWidth = helper->getWidth();
            if (restrictedWidth > container.queryJob().querySlaves())
                throw MakeActivityException(this, 0, "Unsupported, can't refactor to width(%d) larger than host cluster(%d)", restrictedWidth, container.queryJob().querySlaves());
            else if (restrictedWidth < container.queryJob().querySlaves())
            {
                if (!isLocal)
                    throw MakeActivityException(this, 0, "Unsupported, refactoring to few parts only supported for local indexes.");
                assertex(!singlePartKey);
                unsigned gwidth = groups.item(0).ordinality();
                if (0 != container.queryJob().querySlaves() % gwidth)
                    throw MakeActivityException(this, 0, "Unsupported, refactored target size (%d) must be factor of thor cluster width (%d)", groups.item(0).ordinality(), container.queryJob().querySlaves());
                if (0 == restrictedWidth)
                    restrictedWidth = gwidth;
                ForEachItemIn(g, groups)
                {
                    IGroup &group = groups.item(g);
                    if (gwidth != groups.item(g).ordinality())
                        throw MakeActivityException(this, 0, "Unsupported, cannot output multiple refactored widths, targeting cluster '%s' and '%s'", clusters.item(0), clusters.item(g));
                    if (gwidth != restrictedWidth)
                        groups.replace(*group.subset((unsigned)0, restrictedWidth), g);
                }
                refactor = true;
            }
예제 #4
0
void CWriteMasterBase::init()
{
    published = false;
    recordsProcessed = 0;
    dlfn.set(diskHelperBase->getFileName());
    if (diskHelperBase->getFlags() & TDWextend)
    {
        assertex(0 == (diskHelperBase->getFlags() & (TDXtemporary|TDXjobtemp)));
        Owned<IDistributedFile> file = queryThorFileManager().lookup(container.queryJob(), diskHelperBase->getFileName(), false, true);
        if (file.get())
        {
            fileDesc.setown(file->getFileDescriptor());
            queryThorFileManager().noteFileRead(container.queryJob(), file, true);
        }
    }
    if (dlfn.isExternal())
        mpTag = container.queryJob().allocateMPTag(); // used 
    if (NULL == fileDesc.get())
    {
        bool overwriteok = 0!=(TDWoverwrite & diskHelperBase->getFlags());
        
        unsigned idx=0;
        while (diskHelperBase->queryCluster(idx))
            clusters.append(diskHelperBase->queryCluster(idx++));
        IArrayOf<IGroup> groups;
        fillClusterArray(container.queryJob(), diskHelperBase->getFileName(), clusters, groups);
        fileDesc.setown(queryThorFileManager().create(container.queryJob(), diskHelperBase->getFileName(), clusters, groups, overwriteok, diskHelperBase->getFlags()));
        if (1 == groups.ordinality())
            targetOffset = getGroupOffset(groups.item(0), container.queryJob().querySlaveGroup());
        IPropertyTree &props = fileDesc->queryProperties();
        if (diskHelperBase->getFlags() & (TDWowned|TDXjobtemp|TDXtemporary))
            props.setPropBool("@owned", true);
        if (diskHelperBase->getFlags() & TDWresult)
            props.setPropBool("@result", true);
        const char *rececl= diskHelperBase->queryRecordECL();
        if (rececl&&*rececl)
            props.setProp("ECL", rececl);
        bool blockCompressed=false;
        void *ekey;
        size32_t ekeylen;
        diskHelperBase->getEncryptKey(ekeylen,ekey);
        if (ekeylen)
        {
            memset(ekey,0,ekeylen);
            free(ekey);
            props.setPropBool("@encrypted", true);      
            blockCompressed = true;
        }
        else if (0 != (diskHelperBase->getFlags() & TDWnewcompress) || 0 != (diskHelperBase->getFlags() & TDXcompress))
            blockCompressed = true;
        if (blockCompressed)
            props.setPropBool("@blockCompressed", true);
        if (TAKdiskwrite == container.getKind() && (0 != (diskHelperBase->getFlags() & TDXtemporary)) && container.queryOwner().queryOwner() && (!container.queryOwner().isGlobal())) // I am in a child query
        { // do early, because this will be local act. and will not come back to master until end of owning graph.
            publish();
        }
    }
}
예제 #5
0
bool CWsDfuXRefEx::onDFUXRefList(IEspContext &context, IEspDFUXRefListRequest &req, IEspDFUXRefListResponse &resp)
{
    try
    {
        if (!context.validateFeatureAccess(FEATURE_URL, SecAccess_Read, false))
            throw MakeStringException(ECLWATCH_DFU_XREF_ACCESS_DENIED, "Failed to access Xref. Permission denied.");

        StringBuffer username;
        context.getUserID(username);
        DBGLOG("CWsDfuXRefEx::onDFUXRefList User=%s",username.str());


        //Firstly we need to get a list of the available Thor Cluster....
        IArrayOf<IEspTpCluster> clusters;
        CTpWrapper _topology;
        _topology.getClusterProcessList(eqThorCluster,clusters,false,true);
        ///_topology.getClusterList(eqRoxieCluster,clusters,false,true);

        Owned<IPropertyTree> pXRefNodeTree = createPTree("XRefNodes");
        //DBGLOG("CWsDfuXRefEx::onDFUXRefList1\n");

        for (unsigned x=0;x<=clusters.ordinality();x++)
        {
            IPropertyTree* XRefTreeNode = pXRefNodeTree->addPropTree("XRefNode", createPTree(ipt_caseInsensitive));
            
            IEspTpCluster* cluster = x<clusters.ordinality()?&clusters.item(x):NULL;        
            const char *clustername = cluster?cluster->getName():"SuperFiles";

            XRefTreeNode->setProp("Name",clustername);
            //create the node if it doesn;t exist
            Owned<IXRefNode> xRefNode = XRefNodeManager->getXRefNode(clustername);
            if (xRefNode == 0)
            {
                XRefTreeNode->setProp("Modified","");
                XRefTreeNode->setProp("Status","Not Run");
            }
            else
            {
                  StringBuffer buf;
                XRefTreeNode->setProp("Modified",xRefNode->getLastModified(buf).str());
                    buf.clear();
                XRefTreeNode->setProp("Status",xRefNode->getStatus(buf).str());
            }
        }
        

        StringBuffer buf;
        resp.setDFUXRefListResult(toXML(pXRefNodeTree, buf).str());
    }
    catch(IException* e)
    {   
        FORWARDEXCEPTION(context, e,  ECLWATCH_INTERNAL_ERROR);
    }
    return true;
}
예제 #6
0
 aindex_t getAllCachedTables(IArrayOf<CTable> &_tables)
 {
     CriticalBlock b(m_crit);
     HashIterator iter(m_tableCache);
     ForEach(iter)
     {
         CTable *tbl = m_tableCache.mapToValue(&iter.query());
         _tables.append(*(LINK(tbl)));
     }
     return _tables.ordinality();
 }
예제 #7
0
 void getGroupByString(char delimiter, StringBuffer & str)
 {
     int groupbycount = groupbyList.length();
     for (int i = 0; i < groupbycount; i++)
     {
         ISQLExpression * ordercol =  &groupbyList.item(i);
         SQLFieldValueExpression * colexp = dynamic_cast<SQLFieldValueExpression *>(ordercol);
         str.append(colexp->getName());
         if (i != groupbycount - 1)
             str.append(delimiter);
     }
 }
예제 #8
0
 void getOrderByString(char delimiter, StringBuffer & str)
 {
     int orderbycount = orderbyList.length();
     for (int i = 0; i < orderbycount; i++)
     {
         ISQLExpression* ordercol = &orderbyList.item(i);
         SQLFieldValueExpression* colexp = dynamic_cast<SQLFieldValueExpression*>(ordercol);
         str.append(colexp->isAscending() ? "" : "-");
         str.append(colexp->getNameOrAlias());
         if (i != orderbycount - 1)
             str.append(delimiter);
     }
 }
예제 #9
0
void CResPermissionsCache::add( IArrayOf<ISecResource>& resources )
{
    time_t tstamp;
    time(&tstamp);

    int nresources = resources.ordinality();
    for (int i = 0; i < nresources; i++)
    {
        ISecResource* secResource = &resources.item(i);
        if(!secResource)
            continue;
        const char* resource = secResource->getName();
        SecResourceType resourcetype = secResource->getResourceType();
        if(resource == NULL)
            continue;
        int permissions = secResource->getAccessFlags();
        if(permissions == -1)
            continue;

        MapResAccess::iterator it = m_resAccessMap.find(SecCacheKeyEntry(resource, resourcetype));
        if (it != m_resAccessMap.end())//already exists so overwrite it but first remove existing timestamp info
        {
            ResPermCacheEntry& resParamCacheEntry = (*it).second;
            time_t oldtstamp = resParamCacheEntry.first;


            //there may be multiple resources associated with the same timestamp 
            //in the multimap so find this entry
            //
            MapTimeStamp::iterator itL = m_timestampMap.lower_bound( oldtstamp );
            MapTimeStamp::iterator itU = m_timestampMap.upper_bound( oldtstamp );
            MapTimeStamp::iterator its;
            for ( its = itL; its != itU; its++)
            {
                SecCacheKeyEntry& cachekey = (*its).second;
                if (cachekey.first == resource && cachekey.second == resourcetype)
                {
                    m_timestampMap.erase(its);
                    break;
                }
            }
            m_resAccessMap.erase(SecCacheKeyEntry(resource, resourcetype));
        }
#ifdef _DEBUG
        DBGLOG("CACHE: CResPermissionsCache Adding %s:%s(%d)", m_user.c_str(), resource, permissions);
#endif
        m_resAccessMap.insert( pair<SecCacheKeyEntry, ResPermCacheEntry>(SecCacheKeyEntry(resource, resourcetype),  ResPermCacheEntry(tstamp, secResource->clone())));
        m_timestampMap.insert( pair<time_t, SecCacheKeyEntry>(tstamp, SecCacheKeyEntry(resource, resourcetype)));
    }
}
예제 #10
0
void closedownClientProcess()
{
    if (!daliClientIsActive)
        return;
    while (shutdownHooks.ordinality())
    {
        Owned<IDaliClientShutdown> c = &shutdownHooks.popGet();
        c->clientShutdown();
    }
    clearPagedElementsCache(); // has connections
    closeSDS();
    closeSubscriptionManager();
    stopClientProcess();
    closeCoven();
    stopMPServer();
    daliClientIsActive = false;
}
예제 #11
0
int CFileSpraySoapBindingEx::onFinishUpload(IEspContext &ctx, CHttpRequest* request, CHttpResponse* response,   const char *service, const char *method, StringArray& fileNames, StringArray& files, IMultiException *me)
{
    if (!me || (me->ordinality()==0))
    {
        if (ctx.getResponseFormat()==ESPSerializationANY)
        {
            StringBuffer newUrl, netAddress, path;
            request->getParameter("NetAddress", netAddress);
            request->getParameter("Path", path);
            newUrl.appendf("/FileSpray/DropZoneFiles?NetAddress=%s&Path=%s", netAddress.str(), path.str());
            response->redirect(*request, newUrl.str());
        }
        else
        {
            IArrayOf<IEspDFUActionResult> results;
            Owned<CUploadFilesResponse> esp_response = new CUploadFilesResponse("FileSpray");
            ForEachItemIn(i, fileNames)
            {
                const char* fileName = fileNames.item(i);
                Owned<IEspDFUActionResult> res = createDFUActionResult("", "");
                res->setID(fileName);
                res->setAction("Upload File");
                res->setResult("Success");
                results.append(*res.getLink());
            }
            if (!results.length())
            {
                Owned<IEspDFUActionResult> res = createDFUActionResult("", "");
                res->setID("<N/A>");
                res->setAction("Upload File");
                res->setResult("No file uploaded");
                results.append(*res.getLink());
            }
            esp_response->setUploadFileResults(results);

            MemoryBuffer content;
            StringBuffer mimetype;
            esp_response->appendContent(&ctx,content, mimetype);
            response->setContent(content.length(), content.toByteArray());
            response->setContentType(mimetype.str());
            response->send();
        }
    }
    else
    {
void CWSESPControlEx::cleanSessions(bool allSessions, const char* _id, const char* _userID, const char* _fromIP)
{
    StringBuffer searchPath;
    setSessionXPath(allSessions, _id, _userID, _fromIP, searchPath);

    Owned<IRemoteConnection> globalLock = querySDSConnectionForESPSession(RTM_LOCK_WRITE, SESSION_SDS_LOCK_TIMEOUT);
    Owned<IPropertyTreeIterator> iter = globalLock->queryRoot()->getElements("*");
    ForEach(*iter)
    {
        IArrayOf<IPropertyTree> toRemove;
        Owned<IPropertyTreeIterator> iter1 = iter->query().getElements(searchPath.str());
        ForEach(*iter1)
            toRemove.append(*LINK(&iter1->query()));

        ForEachItemIn(i, toRemove)
            iter->query().removeTree(&toRemove.item(i));
    }
}
예제 #13
0
void CSlavePartMapping::getParts(unsigned i, IArrayOf<IPartDescriptor> &parts)
{
    if (local)
        i = 0;
    if (i>=maps.ordinality()) return;

    CSlaveMap &map = maps.item(i);
    ForEachItemIn(m, map)
        parts.append(*LINK(&map.item(m)));
}
void getPackageListInfo(IPropertyTree *mapTree, IEspPackageListMapData *pkgList)
{
    pkgList->setId(mapTree->queryProp("@id"));
    pkgList->setTarget(mapTree->queryProp("@querySet"));

    Owned<IPropertyTreeIterator> iter = mapTree->getElements("Package");
    IArrayOf<IConstPackageListData> results;
    ForEach(*iter)
    {
        IPropertyTree &item = iter->query();

        Owned<IEspPackageListData> res = createPackageListData("", "");
        res->setId(item.queryProp("@id"));
        if (item.hasProp("@queries"))
            res->setQueries(item.queryProp("@queries"));
        results.append(*res.getClear());
    }
    pkgList->setPkgListData(results);
}
예제 #15
0
static void stopServer()
{
    CriticalBlock b(*stopServerCrit); // NB: will not protect against abort handler, which will interrupt thread and be on same TID.
    if (serverStopped) return;
    serverStopped = true;
    ForEachItemInRev(h,servers)
    {
        IDaliServer &server=servers.item(h);
        LOG(MCprogress, unknownJob, "Suspending %d",h);
        server.suspend();
    }
예제 #16
0
bool reinitClientProcess(IGroup *servergrp, DaliClientRole role, const char *clientVersion, const char *minServerVersion, unsigned timeout)
{
    if (!daliClientIsActive)
        return false;
    while (shutdownHooks.ordinality())
    {
        Owned<IDaliClientShutdown> c = &shutdownHooks.popGet();
        c->clientShutdown();
    }
    stopClientProcess();
    closeSDS();
    closeSubscriptionManager();
    closeCoven();
    Owned<ICommunicator> comm(createCommunicator(servergrp,true));
    IGroup * covengrp;
    if (!registerClientProcess(comm.get(),covengrp,timeout,role))   // should be save as before TBD
        return false;
    initCoven(covengrp,NULL,clientVersion,minServerVersion);
    covengrp->Release();
    return true;
}
bool CWSESPControlEx::onSessionQuery(IEspContext& context, IEspSessionQueryRequest& req, IEspSessionQueryResponse& resp)
{
    try
    {
#ifdef _USE_OPENLDAP
        CLdapSecManager* secmgr = dynamic_cast<CLdapSecManager*>(context.querySecManager());
        if(secmgr && !secmgr->isSuperUser(context.queryUser()))
        {
            context.setAuthStatus(AUTH_STATUS_NOACCESS);
            throw MakeStringException(ECLWATCH_SUPER_USER_ACCESS_DENIED, "Failed to query session. Permission denied.");
        }
#endif

        StringBuffer xpath;
        setSessionXPath(false, nullptr, req.getUserID(), req.getFromIP(), xpath);

        IArrayOf<IEspSession> sessions;
        Owned<IRemoteConnection> globalLock = querySDSConnectionForESPSession(RTM_LOCK_READ, SESSION_SDS_LOCK_TIMEOUT);
        Owned<IPropertyTreeIterator> iter = globalLock->queryRoot()->getElements("*");
        ForEach(*iter)
        {
            IPropertyTree& appSessionTree = iter->query();
            unsigned port = appSessionTree.getPropInt("@port");
            Owned<IPropertyTreeIterator> iter1 = appSessionTree.getElements(xpath.str());
            ForEach(*iter1)
            {
                IPropertyTree& sessionTree = iter1->query();
                Owned<IEspSession> s = createSession();
                setSessionInfo(&sessionTree, port, s);
                sessions.append(*s.getLink());
            }
        }
        resp.setSessions(sessions);
    }
    catch(IException* e)
    {
        FORWARDEXCEPTION(context, e, ECLWATCH_INTERNAL_ERROR);
    }
    return true;
}
예제 #18
0
 aindex_t getMatchingTables(const char * tblFilter, IArrayOf<CTable> &_tables, bool bExact)
 {
     CriticalBlock b(m_crit);
     HashIterator iter(m_tableCache);
     if (bExact)
     {
         CTable *tbl = m_tableCache.getValue(tblFilter);
         if (tbl)
             _tables.append(*(LINK(tbl)));
     }
     else
     {
         unsigned filterLen = strlen(tblFilter);
         ForEach(iter)
         {
             CTable *tbl = m_tableCache.mapToValue(&iter.query());
             if (0 == strnicmp(tblFilter, tbl->queryName(), filterLen))
                 _tables.append(*(LINK(tbl)));
         }
     }
     return _tables.ordinality();
 }
예제 #19
0
void AddServers(const char *auditdir)
{
    // order significant
    servers.append(*createDaliSessionServer());
    servers.append(*createDaliPublisherServer());
    servers.append(*createDaliSDSServer(serverConfig));
    servers.append(*createDaliNamedQueueServer());
    servers.append(*createDaliDFSServer(serverConfig));
    servers.append(*createDaliAuditServer(auditdir));
    servers.append(*createDaliDiagnosticsServer());
    // add new coven servers here
}
예제 #20
0
 //Outputs
 void addOutputDatasetColumn(const char * datasetName, CColumn * _output)
 {
     if (0 == m_outputDatasets.ordinality())
         m_outputDatasets.append(*(new CTable(datasetName,NULL,NULL)));//add new output dataset
     else
     {
         CTable & dataset = m_outputDatasets.item(m_outputDatasets.ordinality() - 1);
         if (0 != strcmp(datasetName, dataset.queryName()))
             m_outputDatasets.append(*(new CTable(datasetName,NULL,NULL)));//add new output dataset
     }
     CTable & dataset = m_outputDatasets.item(m_outputDatasets.ordinality() - 1);
     dataset.addColumn(_output);
 }
예제 #21
0
static void AddServers()
{
    // order significant
    servers.append(*createSashaArchiverServer());
//  servers.append(*createSashaVerifierServer());
    servers.append(*createSashaSDSCoalescingServer());
    servers.append(*createSashaXrefServer());
    servers.append(*createSashaDaFSMonitorServer());
    servers.append(*createSashaQMonitorServer());
    servers.append(*createSashaFileExpiryServer()); 
    // add new servers here
}
예제 #22
0
int CPermissionsCache::lookup( ISecUser& sec_user, IArrayOf<ISecResource>& resources, 
                            bool* pFound)
{
    synchronized block(m_cachemonitor);
    const char* userId = sec_user.getName();
    int nFound;
    MapResPermissionsCache::const_iterator i = m_resPermissionsMap.find( userId ); 

    if (i != m_resPermissionsMap.end())
    {
        CResPermissionsCache* pResPermissionsCache = (*i).second;
        nFound = pResPermissionsCache->lookup( resources, pFound );
    }
    else
    {
        nFound = 0;
        memset(pFound, 0, sizeof(bool)*resources.ordinality());
        //DBGLOG("CACHE: Looking up %s:*", userId);
    }

    return nFound;
}
예제 #23
0
bool CPermissionsCache::addManagedFileScopes(IArrayOf<ISecResource>& scopes)
{
    WriteLockBlock writeLock(m_scopesRWLock);
    ForEachItemIn(x, scopes)
    {
        ISecResource* scope = &scopes.item(x);
        if(!scope)
            continue;
        const char* cachekey = scope->getName();
        if(cachekey == NULL)
            continue;
        map<string, ISecResource*>::iterator it = m_managedFileScopesMap.find(cachekey);
        if (it != m_managedFileScopesMap.end())
        {
            ISecResource *res = (*it).second;
            res->Release();
            m_managedFileScopesMap.erase(it);
        }
#ifdef _DEBUG
        DBGLOG("Caching Managed File Scope %s",cachekey);
#endif
        m_managedFileScopesMap.insert( pair<string, ISecResource*>(cachekey, LINK(scope)));
    }
예제 #24
0
int CPermissionsCache::lookup( ISecUser& sec_user, IArrayOf<ISecResource>& resources, 
                            bool* pFound)
{
    const char* userId = sec_user.getName();
    int nFound;
    ReadLockBlock readLock(m_resPermCacheRWLock);
    MapResPermissionsCache::const_iterator i = m_resPermissionsMap.find( userId ); 
    if (i != m_resPermissionsMap.end())
    {
        CResPermissionsCache* pResPermissionsCache = (*i).second;
        nFound = pResPermissionsCache->lookup( resources, pFound );
    }
    else
    {
        nFound = 0;
        memset(pFound, 0, sizeof(bool)*resources.ordinality());
    }

#ifdef _DEBUG
    DBGLOG("CACHE: CPermissionsCache Looked up resources for %s:*, found %d of %d matches", userId, nFound, resources.ordinality());
#endif
    return nFound;
}
예제 #25
0
    virtual void process() override
    {
        ActPrintLog("INDEXWRITE: Start");
        init();

        IRowStream *stream = inputStream;
        ThorDataLinkMetaInfo info;
        input->getMetaInfo(info);
        outRowAllocator.setown(getRowAllocator(helper->queryDiskRecordSize()));
        start();
        if (refactor)
        {
            assertex(isLocal);
            if (active)
            {
                unsigned targetWidth = partDesc->queryOwner().numParts()-(buildTlk?1:0);
                assertex(0 == container.queryJob().querySlaves() % targetWidth);
                unsigned partsPerNode = container.queryJob().querySlaves() / targetWidth;
                unsigned myPart = queryJobChannel().queryMyRank();

                IArrayOf<IRowStream> streams;
                streams.append(*LINK(stream));
                --partsPerNode;

 // Should this be merging 1,11,21,31 etc.
                unsigned p=0;
                unsigned fromPart = targetWidth+1 + (partsPerNode * (myPart-1));
                for (; p<partsPerNode; p++)
                {
                    streams.append(*createRowStreamFromNode(*this, fromPart++, queryJobChannel().queryJobComm(), mpTag, abortSoon));
                }
                ICompare *icompare = helper->queryCompare();
                assertex(icompare);
                Owned<IRowLinkCounter> linkCounter = new CThorRowLinkCounter;
                myInputStream.setown(createRowStreamMerger(streams.ordinality(), streams.getArray(), icompare, false, linkCounter));
                stream = myInputStream;
            }
            else // serve nodes, creating merged parts
                rowServer.setown(createRowServer(this, stream, queryJobChannel().queryJobComm(), mpTag));
        }
        processed = THORDATALINK_STARTED;

        // single part key support
        // has to serially pull all data fron nodes 2-N
        // nodes 2-N, could/should start pushing some data (as it's supposed to be small) to cut down on serial nature.
        unsigned node = queryJobChannel().queryMyRank();
        if (singlePartKey)
        {
            if (1 == node)
            {
                try
                {
                    open(*partDesc, false, helper->queryDiskRecordSize()->isVariableSize());
                    loop
                    {
                        OwnedConstThorRow row = inputStream->ungroupedNextRow();
                        if (!row)
                            break;
                        if (abortSoon) return;
                        processRow(row);
                    }

                    unsigned node = 2;
                    while (node <= container.queryJob().querySlaves())
                    {
                        Linked<IOutputRowDeserializer> deserializer = ::queryRowDeserializer(input);
                        CMessageBuffer mb;
                        Owned<ISerialStream> stream = createMemoryBufferSerialStream(mb);
                        CThorStreamDeserializerSource rowSource;
                        rowSource.setStream(stream);
                        bool successSR;
                        loop
                        {
                            {
                                BooleanOnOff tf(receivingTag2);
                                successSR = queryJobChannel().queryJobComm().sendRecv(mb, node, mpTag2);
                            }
                            if (successSR)
                            {
                                if (rowSource.eos())
                                    break;
                                Linked<IEngineRowAllocator> allocator = ::queryRowAllocator(input);
                                do
                                {
                                    RtlDynamicRowBuilder rowBuilder(allocator);
                                    size32_t sz = deserializer->deserialize(rowBuilder, rowSource);
                                    OwnedConstThorRow fRow = rowBuilder.finalizeRowClear(sz);
                                    processRow(fRow);
                                }
                                while (!rowSource.eos());
                            }
                        }
                        node++;
                    }
                }
                catch (CATCHALL)
                {
                    close(*partDesc, partCrc, true);
                    throw;
                }
                close(*partDesc, partCrc, true);
                doStopInput();
            }
            else
            {
                CMessageBuffer mb;
                CMemoryRowSerializer mbs(mb);
                Linked<IOutputRowSerializer> serializer = ::queryRowSerializer(input);
                loop
                {
                    BooleanOnOff tf(receivingTag2);
                    if (queryJobChannel().queryJobComm().recv(mb, 1, mpTag2)) // node 1 asking for more..
                    {
                        if (abortSoon) break;
                        mb.clear();
                        do
                        {
                            OwnedConstThorRow row = inputStream->ungroupedNextRow();
                            if (!row) break;
                            serializer->serialize(mbs, (const byte *)row.get());
                        } while (mb.length() < SINGLEPART_KEY_TRANSFER_SIZE); // NB: at least one row
                        if (!queryJobChannel().queryJobComm().reply(mb))
                            throw MakeThorException(0, "Failed to send index data to node 1, from node %d", node);
                        if (0 == mb.length())
                            break;
                    }
                }
            }
        }
예제 #26
0
int CResPermissionsCache::lookup( IArrayOf<ISecResource>& resources, bool* pFound )
{
    time_t tstamp;
    time(&tstamp);

    int timeout = m_pParentCache->getCacheTimeout();
    if(timeout == 0 && m_pParentCache->isTransactionalEnabled())
        timeout = 10; //Transactional timeout is set to 10 seconds for long transactions that might take over 10 seconds.
    tstamp -= timeout;
    if (m_tLastCleanup < tstamp)
        removeStaleEntries(tstamp);

    int nresources = resources.ordinality();
    int nFound = 0;

    for (int i = 0; i < nresources; i++)
    {
        ISecResource& secResource = resources.item(i);
        const char* resource = secResource.getName();
        if(resource == NULL)
        {
            *pFound++ = false;
            continue;
        }
#ifdef _DEBUG
        DBGLOG("CACHE: CResPermissionsCache Looking up resource(%d of %d) %s:%s", i, nresources, m_user.c_str(), resource);
#endif
        MapResAccess::iterator it = m_resAccessMap.find(SecCacheKeyEntry(resource, secResource.getResourceType()));
        if (it != m_resAccessMap.end())//exists in cache
        {
            ResPermCacheEntry& resParamCacheEntry = (*it).second;
            const time_t timestamp = resParamCacheEntry.first;

            if (timestamp < tstamp)//entry was not stale during last cleanup but is stale now
                *pFound++ = false;
            else if(!m_pParentCache->isCacheEnabled() && m_pParentCache->isTransactionalEnabled())//m_pParentCache->getOriginalTimeout() == 0)
            {
                time_t tctime = getThreadCreateTime();
                if(tctime <= 0 || timestamp < tctime)
                {
                    *pFound++ = false;
                }
                else
                {
                    secResource.copy(resParamCacheEntry.second);
#ifdef _DEBUG
                    DBGLOG("CACHE: CResPermissionsCache FoundA %s:%s=>%d", m_user.c_str(), resource, ((ISecResource*)resParamCacheEntry.second)->getAccessFlags());
#endif
                    *pFound++ = true;
                    nFound++;
                }
            }
            else
            {
                secResource.copy(resParamCacheEntry.second);
#ifdef _DEBUG
                DBGLOG("CACHE: CResPermissionsCache FoundB %s:%s=>%d", m_user.c_str(), resource, ((ISecResource*)resParamCacheEntry.second)->getAccessFlags());
#endif
                *pFound++ = true;
                nFound++;
            }
        }
        else
            *pFound++ = false;
    }
    return nFound;
}
예제 #27
0
 ForEachItemInRev(j,servers)
 {
     servers.remove(j);      // ensure correct order for destruction
 }
예제 #28
0
MODULE_EXIT()
{
    shutdownHooks.kill();
}
예제 #29
0
void removeShutdownHook(IDaliClientShutdown &shutdown)
{
    shutdownHooks.zap(shutdown);
}
예제 #30
0
void addShutdownHook(IDaliClientShutdown &shutdown)
{
    shutdownHooks.append(*LINK(&shutdown));
}