コード例 #1
0
 const void *getFirst() // for global, not called on 1st slave
 {
     CMessageBuffer msg;
     if (!queryJobChannel().queryJobComm().recv(msg, queryJobChannel().queryMyRank()-1, mpTag)) // previous node
         return NULL;
     msg.read(count);
     size32_t r = msg.remaining();
     OwnedConstThorRow firstRow;
     if (r)
         firstRow.deserialize(inrowif, r, msg.readDirect(r));
     return firstRow.getClear();
 }
コード例 #2
0
    void process()
    {
        CWorkUnitWriteMasterBase::process();

        unsigned nslaves = container.queryJob().querySlaves();

        CMessageBuffer mb;
        unsigned s=0;
        for (; s<nslaves; s++)
        {
            loop
            {
                if (!container.queryJob().queryJobComm().send(mb, s+1, mpTag)) return;
                if (!receiveMsg(mb, s+1, mpTag)) return;
                if (0 == mb.length())
                    break;
                unsigned numGot;
                mb.read(numGot);
                unsigned l=mb.remaining();
                if (workunitWriteLimit && totalSize+resultData.length()+l > workunitWriteLimit)
                {
                    StringBuffer errMsg("Dataset too large to output to workunit (limit is set to ");
                    errMsg.append(workunitWriteLimit/0x100000).append(") megabytes, in result (");
                    if (resultName.length())
                        errMsg.append("name=").append(resultName);
                    else
                        errMsg.append("sequence=").append(resultSeq);
                    errMsg.append(")");
                    throw MakeThorException(TE_WorkUnitWriteLimitExceeded, "%s", errMsg.str());
                }
                resultData.append(l, mb.readDirect(l));
                mb.clear();
                numResults += numGot;

                if (-1 != flushThreshold && resultData.length() >= (unsigned)flushThreshold)
                    flushResults();
            }
        }
        flushResults(true);
    }