void cancelReplicates(CActivityBase *activity, IPartDescriptor &partDesc)
{
    RemoteFilename rfn;
    IFileDescriptor &fileDesc = partDesc.queryOwner();
    unsigned copies = partDesc.numCopies();
    unsigned c=1;
    for (; c<copies; c++)
    {
        unsigned replicateCopy;
        unsigned clusterNum = partDesc.copyClusterNum(c, &replicateCopy);
        rfn.clear();
        partDesc.getFilename(c, rfn);
        StringBuffer dstName;
        rfn.getPath(dstName);
        assertex(dstName.length());

        if (replicateCopy>0)  
        {
            try
            {
                queryThor().queryBackup().cancel(dstName.str());
            }
            catch (IException *e)
            {
                Owned<IThorException> re = MakeActivityException(activity, e, "Error cancelling backup '%s'", dstName.str());
                ActPrintLog(&activity->queryContainer(), e, NULL);
                e->Release();
            }
        }
    }
}
//cloned from hthor - a candidate for commoning up.
static IKeyIndex *openKeyFile(IDistributedFilePart *keyFile)
{
    unsigned numCopies = keyFile->numCopies();
    assertex(numCopies);
    for (unsigned copy=0; copy < numCopies; copy++)
    {
        RemoteFilename rfn;
        try
        {
            OwnedIFile ifile = createIFile(keyFile->getFilename(rfn,copy));
            unsigned __int64 thissize = ifile->size();
            if (thissize != -1)
            {
                StringBuffer remotePath;
                rfn.getRemotePath(remotePath);
                unsigned crc = 0;
                keyFile->getCrc(crc);
                return createKeyIndex(remotePath.str(), crc, false, false);
            }
        }
        catch (IException *E)
        {
            EXCLOG(E, "While opening index file");
            E->Release();
        }
    }
    RemoteFilename rfn;
    StringBuffer url;
    keyFile->getFilename(rfn).getRemotePath(url);
    throw MakeStringException(1001, "Could not open key file at %s%s", url.str(), (numCopies > 1) ? " or any alternate location." : ".");
}
    virtual void init(MemoryBuffer &data, MemoryBuffer &slaveData) override
    {
        isLocal = 0 != (TIWlocal & helper->getFlags());

        mpTag = container.queryJobChannel().deserializeMPTag(data);
        mpTag2 = container.queryJobChannel().deserializeMPTag(data);
        data.read(active);
        if (active)
        {
            data.read(logicalFilename);
            partDesc.setown(deserializePartFileDescriptor(data));
        }

        data.read(singlePartKey);
        data.read(refactor);
        if (singlePartKey)
            buildTlk = false;
        else
        {
            data.read(buildTlk);
            if (firstNode())
            {
                if (buildTlk)
                    tlkDesc.setown(deserializePartFileDescriptor(data));
                else if (!isLocal) // existing tlk then..
                {
                    tlkDesc.setown(deserializePartFileDescriptor(data));
                    unsigned c;
                    data.read(c);
                    while (c--)
                    {
                        RemoteFilename rf;
                        rf.deserialize(data);
                        if (!existingTlkIFile)
                        {
                            Owned<IFile> iFile = createIFile(rf);
                            if (iFile->exists())
                                existingTlkIFile.set(iFile);
                        }
                    }
                    if (!existingTlkIFile)
                        throw MakeActivityException(this, TE_FileNotFound, "Top level key part does not exist, for key");
                }
            }
        }

        IOutputMetaData * diskSize = helper->queryDiskRecordSize();
        assertex(!(diskSize->getMetaFlags() & MDFneedserializedisk));
        if (diskSize->isVariableSize())
        {
            if (TIWmaxlength & helper->getFlags())
                maxDiskRecordSize = helper->getMaxKeySize();
            else
                maxDiskRecordSize = KEYBUILD_MAXLENGTH; //Current default behaviour, could be improved in the future
        }
        else
            maxDiskRecordSize = diskSize->getFixedSize();
        reportOverflow = false;
    }
void setCanAccessDirectly(RemoteFilename & file,bool set)
{
    if (set)
        file.setPort(0);
    else if (file.getPort()==0)                 // foreign daliservix may be passed in
        file.setPort(getDaliServixPort());

}
Exemple #5
0
void getDfuTempName(RemoteFilename & temp, const RemoteFilename & src)
{
    StringBuffer ext;
    src.split(NULL, NULL, NULL, &ext);
    ext.append(".tmp");

    temp.set(src);
    temp.setExtension(ext);
}
static IKeyIndex *openKeyPart(CActivityBase *activity, const char *logicalFilename, IPartDescriptor &partDesc)
{
    RemoteFilename rfn;
    partDesc.getFilename(0, rfn);
    StringBuffer filePath;
    rfn.getPath(filePath);
    unsigned crc=0;
    partDesc.getCrc(crc);
    Owned<IDelayedFile> lfile = queryThor().queryFileCache().lookup(*activity, partDesc);
    return createKeyIndex(filePath.str(), crc, *lfile, false, false);
}
bool processPartitionCommand(ISocket * masterSocket, MemoryBuffer & msg, MemoryBuffer & results)
{
    FileFormat srcFormat;
    FileFormat tgtFormat;
    unsigned whichInput;
    RemoteFilename fullPath;
    offset_t totalSize;
    offset_t thisOffset;
    offset_t thisSize;
    unsigned thisHeaderSize;
    unsigned numParts;
    bool compressedInput = false;
    unsigned compatflags = 0;  

    srcFormat.deserialize(msg);
    tgtFormat.deserialize(msg);
    msg.read(whichInput);
    fullPath.deserialize(msg);
    msg.read(totalSize);
    msg.read(thisOffset);
    msg.read(thisSize);
    msg.read(thisHeaderSize);
    msg.read(numParts);
    if (msg.remaining())
        msg.read(compressedInput);
    if (msg.remaining())
        msg.read(compatflags); // not yet used
    StringAttr decryptkey;
    if (msg.remaining())
        msg.read(decryptkey);
    if (msg.remaining())
    {
        srcFormat.deserializeExtra(msg, 1);
        tgtFormat.deserializeExtra(msg, 1);
    }

    StringBuffer text;
    fullPath.getRemotePath(text);
    LOG(MCdebugProgress, unknownJob, "Process partition %d(%s)", whichInput, text.str());
    Owned<IFormatProcessor> processor = createFormatProcessor(srcFormat, tgtFormat, true);
    Owned<IOutputProcessor> target = createOutputProcessor(tgtFormat);
    processor->setTarget(target);

    processor->setPartitionRange(totalSize, thisOffset, thisSize, thisHeaderSize, numParts);
    processor->setSource(whichInput, fullPath, compressedInput, decryptkey);
    processor->calcPartitions(NULL);

    PartitionPointArray partition;
    processor->getResults(partition);

    serialize(partition, results);
    return true;
}
void CDiskReadSlaveActivityBase::kill()
{
    if (!abortSoon && 0 != (helper->getFlags() & TDXtemporary) && !container.queryJob().queryUseCheckpoints())
    {
        if (1 == partDescs.ordinality() && !partDescs.item(0).queryOwner().queryProperties().getPropBool("@pausefile"))
        {
            IPartDescriptor &partDesc = partDescs.item(0);
            RemoteFilename rfn;
            partDesc.getFilename(0, rfn);
            StringBuffer locationName;
            rfn.getLocalPath(locationName);
            container.queryTempHandler()->deregisterFile(locationName.str());
        }
    }
    CSlaveActivity::kill();
}
static bool getFileInfo(RemoteFilename &fn, Owned<IFile> &f, offset_t &size,CDateTime &modtime)
{
    f.setown(createIFile(fn));
    bool isdir = false;
    bool ret = f->getInfo(isdir,size,modtime);
    if (ret&&isdir) {
        StringBuffer fs;
        fn.getRemotePath(fs);
        throw MakeStringException(-1,"%s is a directory",fs.str());
    }
    return ret;
}
Exemple #10
0
void renameDfuTempToFinal(const RemoteFilename & realname)
{
    RemoteFilename tempFilename;
    StringBuffer newTailname;
    getDfuTempName(tempFilename, realname);
    realname.getTail(newTailname);

    OwnedIFile output = createIFile(tempFilename);
    try
    {
        output->rename(newTailname);
    }
    catch (IException * e)
    {
        EXCLOG(e, "Failed to rename target file");
        StringBuffer oldName;
        realname.getPath(oldName);
        LOG(MCdebugInfoDetail, unknownJob, "Error: Rename %s->%s failed - tring to delete target and rename again", oldName.str(), newTailname.str());
        e->Release();
        OwnedIFile old = createIFile(realname);
        old->remove();
        output->rename(newTailname);
    }
}
Exemple #11
0
void CWriteMasterBase::publish()
{
    if (published) return;
    published = true;
    if (!(diskHelperBase->getFlags() & (TDXtemporary|TDXjobtemp)))
        updateActivityResult(container.queryJob().queryWorkUnit(), diskHelperBase->getFlags(), diskHelperBase->getSequence(), fileName, recordsProcessed);

    IPropertyTree &props = fileDesc->queryProperties();
    props.setPropInt64("@recordCount", recordsProcessed);
    if (0 == (diskHelperBase->getFlags() & TDXtemporary) || container.queryJob().queryUseCheckpoints())
    {
        if (0 != (diskHelperBase->getFlags() & TDWexpires))
            setExpiryTime(props, diskHelperBase->getExpiryDays());
        if (TDWupdate & diskHelperBase->getFlags())
        {
            unsigned eclCRC;
            unsigned __int64 totalCRC;
            diskHelperBase->getUpdateCRCs(eclCRC, totalCRC);
            props.setPropInt("@eclCRC", eclCRC);
            props.setPropInt64("@totalCRC", totalCRC);
        }
    }
    container.queryTempHandler()->registerFile(fileName, container.queryOwner().queryGraphId(), diskHelperBase->getTempUsageCount(), TDXtemporary & diskHelperBase->getFlags(), getDiskOutputKind(diskHelperBase->getFlags()), &clusters);
    if (!dlfn.isExternal())
    {
        bool temporary = 0 != (diskHelperBase->getFlags()&TDXtemporary);
        if (!temporary && (queryJob().querySlaves() < fileDesc->numParts()))
        {
            // create empty parts for a fileDesc being published that is larger than this clusters
            size32_t recordSize = 0;
            IOutputMetaData *diskRowMeta = diskHelperBase->queryDiskRecordSize()->querySerializedDiskMeta();
            if (diskRowMeta->isFixedSize() && (TAKdiskwrite == container.getKind()))
            {
                recordSize = diskRowMeta->getMinRecordSize();
                if (0 != (diskHelperBase->getFlags() & TDXgrouped))
                    recordSize += 1;
            }
            unsigned compMethod = COMPRESS_METHOD_LZW;
            // rowdiff used if recordSize > 0, else fallback to compMethod
            if (getOptBool(THOROPT_COMP_FORCELZW, false))
            {
                recordSize = 0; // by default if fixed length (recordSize set), row diff compression is used. This forces compMethod.
                compMethod = COMPRESS_METHOD_LZW;
            }
            else if (getOptBool(THOROPT_COMP_FORCEFLZ, false))
                compMethod = COMPRESS_METHOD_FASTLZ;
            else if (getOptBool(THOROPT_COMP_FORCELZ4, false))
                compMethod = COMPRESS_METHOD_LZ4;
            bool blockCompressed;
            bool compressed = fileDesc->isCompressed(&blockCompressed);
            for (unsigned clusterIdx=0; clusterIdx<fileDesc->numClusters(); clusterIdx++)
            {
                StringBuffer clusterName;
                fileDesc->getClusterGroupName(clusterIdx, clusterName, &queryNamedGroupStore());
                PROGLOG("Creating blank parts for file '%s', cluster '%s'", fileName.get(), clusterName.str());
                unsigned p=0;
                while (p<fileDesc->numParts())
                {
                    if (p == targetOffset)
                        p += queryJob().querySlaves();
                    IPartDescriptor *partDesc = fileDesc->queryPart(p);
                    CDateTime createTime, modifiedTime;
                    for (unsigned c=0; c<partDesc->numCopies(); c++)
                    {
                        RemoteFilename rfn;
                        partDesc->getFilename(c, rfn);
                        StringBuffer path;
                        rfn.getPath(path);
                        try
                        {
                            ensureDirectoryForFile(path.str());
                            OwnedIFile iFile = createIFile(path.str());
                            Owned<IFileIO> iFileIO;
                            if (compressed)
                                iFileIO.setown(createCompressedFileWriter(iFile, recordSize, false, true, NULL, compMethod));
                            else
                                iFileIO.setown(iFile->open(IFOcreate));
                            dbgassertex(iFileIO.get());
                            iFileIO.clear();
                            // ensure copies have matching datestamps, as they would do normally (backupnode expects it)
                            if (partDesc->numCopies() > 1)
                            {
                                if (0 == c)
                                    iFile->getTime(&createTime, &modifiedTime, NULL);
                                else
                                    iFile->setTime(&createTime, &modifiedTime, NULL);
                            }
                        }
                        catch (IException *e)
                        {
                            if (0 == c)
                                throw;
                            Owned<IThorException> e2 = MakeThorException(e);
                            e->Release();
                            e2->setAction(tea_warning);
                            queryJob().fireException(e2);
                        }
                    }
                    partDesc->queryProperties().setPropInt64("@size", 0);
                    p++;
                }
                clusterIdx++;
            }
        }
        queryThorFileManager().publish(container.queryJob(), fileName, *fileDesc, NULL);
    }
}
static void test2() 
{
    const size32_t recsize = 17;
    printf("Test DFS\n");
    StringBuffer s;
    unsigned i;
    unsigned n;
    unsigned t;
    queryNamedGroupStore().remove("daregress_group");
    queryDistributedFileDirectory().removeEntry("daregress::superfile1");
    SocketEndpointArray epa;
    for (n=0;n<400;n++) {
        s.clear().append("192.168.").append(n/256).append('.').append(n%256);
        SocketEndpoint ep(s.str());
        epa.append(ep);
    }
    Owned<IGroup> group = createIGroup(epa); 
    queryNamedGroupStore().add("daregress_group",group,true);
    if (!queryNamedGroupStore().find(group,s.clear()))
        ERROR("Created logical group not found");
    if (stricmp(s.str(),"daregress_group")!=0)
        ERROR("Created logical group found with wrong name");
    group.setown(queryNamedGroupStore().lookup("daregress_group"));
    if (!group)
        ERROR("named group lookup failed");
    printf("Named group created    - 400 nodes\n");
    for (i=0;i<100;i++) {
        Owned<IPropertyTree> pp = createPTree("Part");
        Owned<IFileDescriptor>fdesc = createFileDescriptor();
        fdesc->setDefaultDir("c:\\thordata\\regress");
        n = 9;
        for (unsigned k=0;k<400;k++) {
            s.clear().append("192.168.").append(n/256).append('.').append(n%256);
            Owned<INode> node = createINode(s.str());
            pp->setPropInt64("@size",(n*777+i)*recsize);
            s.clear().append("daregress_test").append(i).append("._").append(n+1).append("_of_400");
            fdesc->setPart(n,node,s.str(),pp);
            n = (n+9)%400;
        }
        fdesc->queryProperties().setPropInt("@recordSize",17);
        s.clear().append("daregress::test").append(i);
        queryDistributedFileDirectory().removeEntry(s.str());
        StringBuffer cname;
        Owned<IDistributedFile> dfile = queryDistributedFileDirectory().createNew(fdesc);
        if (stricmp(dfile->getClusterName(0,cname),"daregress_group")!=0)
            ERROR1("Cluster name wrong %d",i);
        s.clear().append("daregress::test").append(i);
        dfile->attach(s.str());
    }
    printf("DFile create done      - 100 files\n");
    unsigned samples = 5;
    t = 33;
    for (i=0;i<100;i++) {
        s.clear().append("daregress::test").append(t);
        if (!queryDistributedFileDirectory().exists(s.str())) 
            ERROR1("Could not find %s",s.str());
        Owned<IDistributedFile> dfile = queryDistributedFileDirectory().lookup(s.str());
        if (!dfile) {
            ERROR1("Could not find %s",s.str());
            continue;
        }
        offset_t totsz = 0;
        n = 11;
        for (unsigned k=0;k<400;k++) {
            Owned<IDistributedFilePart> part = dfile->getPart(n);
            if (!part) {
                ERROR2("part not found %d %d",t,n);
                continue;
            }
            s.clear().append("192.168.").append(n/256).append('.').append(n%256);
            Owned<INode> node = createINode(s.str());
            if (!node->equals(part->queryNode()))
                ERROR2("part node mismatch %d, %d",t,n);
            if (part->getFileSize(false,false)!=(n*777+t)*recsize)
                ERROR4("size node mismatch %d, %d, %d, %d",t,n,(unsigned)part->getFileSize(false,false),(n*777+t)*recsize);
            s.clear().append("daregress_test").append(t).append("._").append(n+1).append("_of_400");
/* ** TBD
            if (stricmp(s.str(),part->queryPartName())!=0)
                ERROR4("part name mismatch %d, %d '%s' '%s'",t,n,s.str(),part->queryPartName());
*/
            totsz += (n*777+t)*recsize;
            if ((samples>0)&&(i+n+t==k)) {
                samples--;
                RemoteFilename rfn;
                part->getFilename(rfn,samples%2);
                StringBuffer fn;
                rfn.getRemotePath(fn);
                printf("SAMPLE: %d,%d %s\n",t,n,fn.str());
            }
            n = (n+11)%400;
        }
        if (totsz!=dfile->getFileSize(false,false))
            ERROR1("total size mismatch %d",t);
        t = (t+33)%100; 
    }
    printf("DFile lookup done      - 100 files\n");

    // check iteration
    __int64 crctot = 0;
    unsigned np = 0;
    unsigned totrows = 0;
    Owned<IDistributedFileIterator> fiter = queryDistributedFileDirectory().getIterator("daregress::*",false); 
    Owned<IDistributedFilePartIterator> piter;
    ForEach(*fiter) {
        piter.setown(fiter->query().getIterator()); 
        ForEach(*piter) {
            RemoteFilename rfn;
            StringBuffer s;
            piter->query().getFilename(rfn,0);
            rfn.getRemotePath(s);
            piter->query().getFilename(rfn,1);
            rfn.getRemotePath(s);
            crctot += crc32(s.str(),s.length(),0);
            np++;
            totrows += (unsigned)(piter->query().getFileSize(false,false)/fiter->query().queryProperties().getPropInt("@recordSize",-1));
        }
    }
    piter.clear();
    fiter.clear();
    printf("DFile iterate done     - %d parts, %d rows, CRC sum %"I64F"d\n",np,totrows,crctot);
    Owned<IDistributedSuperFile> sfile;
    sfile.setown(queryDistributedFileDirectory().createSuperFile("daregress::superfile1",true));
    for (i = 0;i<100;i++) {
        s.clear().append("daregress::test").append(i);
        sfile->addSubFile(s.str());
    }
    sfile.clear();
    sfile.setown(queryDistributedFileDirectory().lookupSuperFile("daregress::superfile1"));
    if (!sfile) {
        ERROR("Could not find added superfile");
        return;
    }
    __int64 savcrc = crctot;
    crctot = 0;
    np = 0;
    totrows = 0;
    size32_t srs = (size32_t)sfile->queryProperties().getPropInt("@recordSize",-1);
    if (srs!=17)
        ERROR1("Superfile does not match subfile row size %d",srs);
    piter.setown(sfile->getIterator()); 
    ForEach(*piter) {
        RemoteFilename rfn;
        StringBuffer s;
        piter->query().getFilename(rfn,0);
        rfn.getRemotePath(s);
        piter->query().getFilename(rfn,1);
        rfn.getRemotePath(s);
        crctot += crc32(s.str(),s.length(),0);
        np++;
        totrows += (unsigned)(piter->query().getFileSize(false,false)/srs);
    }
    piter.clear();
    printf("Superfile iterate done - %d parts, %d rows, CRC sum %"I64F"d\n",np,totrows,crctot);
    if (crctot!=savcrc)
        ERROR("SuperFile does not match sub files");
    unsigned tr = (unsigned)(sfile->getFileSize(false,false)/srs); 
    if (totrows!=tr)
        ERROR1("Superfile size does not match part sum %d",tr);
    sfile->detach();
    sfile.clear();
    sfile.setown(queryDistributedFileDirectory().lookupSuperFile("daregress::superfile1"));
    if (sfile)
        ERROR("Superfile deletion failed");
    t = 37;
    for (i=0;i<100;i++) {
        s.clear().append("daregress::test").append(t);
        if (i%1) {
            Owned<IDistributedFile> dfile = queryDistributedFileDirectory().lookup(s.str());
            if (!dfile)
                ERROR1("Could not find %s",s.str());
            dfile->detach();
        }
        else 
            queryDistributedFileDirectory().removeEntry(s.str());
        t = (t+37)%100; 
    }
    printf("DFile removal complete\n");
    t = 39;
    for (i=0;i<100;i++) {
        if (queryDistributedFileDirectory().exists(s.str()))
            ERROR1("Found %s after deletion",s.str());
        Owned<IDistributedFile> dfile = queryDistributedFileDirectory().lookup(s.str());
        if (dfile)
            ERROR1("Found %s after deletion",s.str());
        t = (t+39)%100; 
    }
    printf("DFile removal check complete\n");
    queryNamedGroupStore().remove("daregress_group");
    if (queryNamedGroupStore().lookup("daregress_group"))
        ERROR("Named group not removed");
}
void CFileSpraySoapBindingEx::downloadFile(IEspContext &context, CHttpRequest* request, CHttpResponse* response)
{
    try
    {
        StringBuffer netAddressStr, osStr, pathStr, nameStr;
        request->getParameter("NetAddress", netAddressStr);
        request->getParameter("OS", osStr);
        request->getParameter("Path", pathStr);
        request->getParameter("Name", nameStr);
        
#if 0
        StringArray files;
        IProperties* params = request->queryParameters();
        Owned<IPropertyIterator> iter = params->getIterator();
        if (iter && iter->first())
        {
            while (iter->isValid())
            {
                const char *keyname=iter->getPropKey();
                if (!keyname || strncmp(keyname, "Names", 5))
                    continue;

                files.append(params->queryProp(iter->getPropKey()));
                iter->next();
            }
        }
#endif

        if (netAddressStr.length() < 1)
            throw MakeStringException(ECLWATCH_INVALID_INPUT, "Network address not specified.");

        if (pathStr.length() < 1)
            throw MakeStringException(ECLWATCH_INVALID_INPUT, "Path not specified.");

        if (nameStr.length() < 1)
            throw MakeStringException(ECLWATCH_INVALID_INPUT,"File name not specified.");

        char pathSep = '/';
        if ((osStr.length() > 1) && (atoi(osStr.str())== OS_WINDOWS))
        {
            pathSep = '\\';
        }

        pathStr.replace(pathSep=='\\'?'/':'\\', pathSep);
        if (*(pathStr.str() + pathStr.length() -1) != pathSep)
            pathStr.append( pathSep );

        StringBuffer fullName;
        fullName.appendf("%s%s", pathStr.str(), nameStr.str());

        StringBuffer headerStr("attachment;");
        headerStr.appendf("filename=%s", nameStr.str());

        RemoteFilename rfn;
        rfn.setRemotePath(fullName.str());
        SocketEndpoint ep(netAddressStr.str());
        rfn.setIp(ep);

        Owned<IFile> rFile = createIFile(rfn);
        if (!rFile)
            throw MakeStringException(ECLWATCH_CANNOT_OPEN_FILE,"Cannot open file %s.",fullName.str());

        OwnedIFileIO rIO = rFile->openShared(IFOread,IFSHfull);
        if (!rIO)
            throw MakeStringException(ECLWATCH_CANNOT_READ_FILE,"Cannot read file %s.",fullName.str());

        IFileIOStream* ioS = createIOStream(rIO);
        context.addCustomerHeader("Content-disposition", headerStr.str());
        response->setContent(ioS);  
        response->setContentType(HTTP_TYPE_OCTET_STREAM);
        response->send();
    }
    catch(IException* e)
    {   
        FORWARDEXCEPTION(context, e,  ECLWATCH_INTERNAL_ERROR);
    }
    return;
}
    void prepareKey(IDistributedFile *index)
    {
        IDistributedFile *f = index;
        IDistributedSuperFile *super = f->querySuperFile();

        unsigned nparts = f->numParts(); // includes tlks if any, but unused in array
        performPartLookup.ensure(nparts);

        bool checkTLKConsistency = (nullptr != super) && !localKey && (0 != (TIRsorted & indexBaseHelper->getFlags()));
        if (nofilter)
        {
            while (nparts--) performPartLookup.append(true);
            if (!checkTLKConsistency) return;
        }
        else
        {
            while (nparts--) performPartLookup.append(false); // parts to perform lookup set later
        }

        Owned<IDistributedFileIterator> iter;
        if (super)
        {
            iter.setown(super->getSubFileIterator(true));
            verifyex(iter->first());
            f = &iter->query();
        }
        unsigned width = f->numParts();
        if (!localKey)
            --width;
        assertex(width);
        unsigned tlkCrc = 0;
        bool first = true;
        unsigned superSubIndex=0;
        bool fileCrc = false, rowCrc = false;
        for (;;)
        {
            Owned<IDistributedFilePart> part = f->getPart(width);
            if (checkTLKConsistency)
            {
                unsigned _tlkCrc;
                if (part->getCrc(_tlkCrc))
                    fileCrc = true;
                else if (part->queryAttributes().hasProp("@crc")) // NB: key "@crc" is not a crc on the file, but data within.
                {
                    _tlkCrc = part->queryAttributes().getPropInt("@crc");
                    rowCrc = true;
                }
                else if (part->queryAttributes().hasProp("@tlkCrc")) // backward compat.
                {
                    _tlkCrc = part->queryAttributes().getPropInt("@tlkCrc");
                    rowCrc = true;
                }
                else
                {
                    if (rowCrc || fileCrc)
                    {
                        checkTLKConsistency = false;
                        Owned<IException> e = MakeActivityWarning(&container, 0, "Cannot validate that tlks in superfile %s match, some crc attributes are missing", super->queryLogicalName());
                        queryJobChannel().fireException(e);
                    }
                }
                if (rowCrc && fileCrc)
                {
                    checkTLKConsistency = false;
                    Owned<IException> e = MakeActivityWarning(&container, 0, "Cannot validate that tlks in superfile %s match, due to mixed crc types.", super->queryLogicalName());
                    queryJobChannel().fireException(e);
                }
                if (checkTLKConsistency)
                {
                    if (first)
                    {
                        tlkCrc = _tlkCrc;
                        first = false;
                    }
                    else if (tlkCrc != _tlkCrc)
                        throw MakeActivityException(this, 0, "Sorted output on super files comprising of non coparitioned sub keys is not supported (TLK's do not match)");
                }
            }
            if (!nofilter)
            {
                Owned<IKeyIndex> keyIndex;
                unsigned copy;
                for (copy=0; copy<part->numCopies(); copy++)
                {
                    RemoteFilename rfn;
                    OwnedIFile ifile = createIFile(part->getFilename(rfn,copy));
                    if (ifile->exists())
                    {
                        StringBuffer remotePath;
                        rfn.getRemotePath(remotePath);
                        unsigned crc = 0;
                        part->getCrc(crc);
                        keyIndex.setown(createKeyIndex(remotePath.str(), crc, false, false));
                        break;
                    }
                }
                if (!keyIndex)
                    throw MakeThorException(TE_FileNotFound, "Top level key part does not exist, for key: %s", index->queryLogicalName());

                unsigned fixedSize = indexBaseHelper->queryDiskRecordSize()->querySerializedDiskMeta()->getFixedSize(); // used only if fixed
                Owned <IKeyManager> tlk = createLocalKeyManager(keyIndex, fixedSize, NULL);
                indexBaseHelper->createSegmentMonitors(tlk);
                tlk->finishSegmentMonitors();
                tlk->reset();
                while (tlk->lookup(false))
                {
                    if (tlk->queryFpos())
                        performPartLookup.replace(true, (aindex_t)(super?super->numSubFiles(true)*(tlk->queryFpos()-1)+superSubIndex:tlk->queryFpos()-1));
                }
            }
            if (!super||!iter->next())
                break;
            superSubIndex++;
            f = &iter->query();
            if (width != f->numParts()-1)
                throw MakeActivityException(this, 0, "Super key %s, with mixture of sub key width are not supported.", f->queryLogicalName());
        }
    }
Exemple #15
0
static bool physicalPartCopy(IFile *from,const char *tofile, Owned<IException> &exc, StringBuffer *tmpname)
{
    StringBuffer tmpnamestr;
    if (!tmpname)
        tmpname = &tmpnamestr;
    tmpname->append(tofile).append("__");
    size32_t l = tmpname->length();
    genUUID(*tmpname,true); // true for windows
    StringAttr uuid(tmpname->str()+l);
    tmpname->append(".tmp");
    RemoteFilename tmpfn;
    tmpfn.setRemotePath(tmpname->str());
    //unsigned lastpc;
#ifdef LOG_PART_COPY
    PROGLOG("start physicalPartCopy(%s,%s)",from->queryFilename(),tmpname->str());
#endif
    try {
        recursiveCreateDirectoryForFile(tmpname->str());
        while(!asyncCopyFileSection(
                uuid,
                from,
                tmpfn,
                (offset_t)-1, // creates file
                0,
                (offset_t)-1, // all file
                NULL,
                PHYSICAL_COPY_POLL_TIME)) {
            // Abort check TBD
        }
    }
    catch (IException *e) {
        EXCLOG(e,"SingleFileCopy: File copy error");
        if (exc)
            exc.setown(e);
        else
            e->Release();
    }
    Owned<IFile> f = createIFile(tmpfn);
    if (!exc.get()&&(tmpnamestr.length()!=0)) {
        try {
#ifdef LOG_PART_COPY
            PROGLOG("physicalPartCopy rename(%s,%s)",tmpname->str(),pathTail(tofile));
#endif
            f->rename(pathTail(tofile));
        }
        catch (IException *e) {
            EXCLOG(e,"SingleFileCopy: File rename error");
            if (exc)
                exc.setown(e);
            else
                e->Release();
        }
    }
    if (exc.get()) {
        try {
            f->remove();
        }
        catch (IException *e) {
            // ignore
            e->Release();
        }
    }
#ifdef LOG_PART_COPY
    PROGLOG("done physicalPartCopy %s",(exc.get()==NULL)?"OK":"Failed");
#endif
    return exc.get()==NULL;
}
    virtual void init()
    {
        CMasterActivity::init();
        OwnedRoxieString indexFileName(helper->getIndexFileName());
        Owned<IDistributedFile> dataFile;
        Owned<IDistributedFile> indexFile = queryThorFileManager().lookup(container.queryJob(), indexFileName, false, 0 != (helper->getJoinFlags() & JFindexoptional), true);

        unsigned keyReadWidth = (unsigned)container.queryJob().getWorkUnitValueInt("KJKRR", 0);
        if (!keyReadWidth || keyReadWidth>container.queryJob().querySlaves())
            keyReadWidth = container.queryJob().querySlaves();
        

        initMb.clear();
        initMb.append(indexFileName.get());
        if (helper->diskAccessRequired())
            numTags += 2;
        initMb.append(numTags);
        unsigned t=0;
        for (; t<numTags; t++)
        {
            tags[t] = container.queryJob().allocateMPTag();
            initMb.append(tags[t]);
        }
        bool keyHasTlk = false;
        if (indexFile)
        {
            unsigned numParts = 0;
            localKey = indexFile->queryAttributes().getPropBool("@local");

            if (container.queryLocalData() && !localKey)
                throw MakeActivityException(this, 0, "Keyed Join cannot be LOCAL unless supplied index is local");

            checkFormatCrc(this, indexFile, helper->getIndexFormatCrc(), true);
            Owned<IFileDescriptor> indexFileDesc = indexFile->getFileDescriptor();
            IDistributedSuperFile *superIndex = indexFile->querySuperFile();
            unsigned superIndexWidth = 0;
            unsigned numSuperIndexSubs = 0;
            if (superIndex)
            {
                numSuperIndexSubs = superIndex->numSubFiles(true);
                bool first=true;
                // consistency check
                Owned<IDistributedFileIterator> iter = superIndex->getSubFileIterator(true);
                ForEach(*iter)
                {
                    IDistributedFile &f = iter->query();
                    unsigned np = f.numParts()-1;
                    IDistributedFilePart &part = f.queryPart(np);
                    const char *kind = part.queryAttributes().queryProp("@kind");
                    bool hasTlk = NULL != kind && 0 == stricmp("topLevelKey", kind); // if last part not tlk, then deemed local (might be singlePartKey)
                    if (first)
                    {
                        first = false;
                        keyHasTlk = hasTlk;
                        superIndexWidth = f.numParts();
                        if (keyHasTlk)
                            --superIndexWidth;
                    }
                    else
                    {
                        if (hasTlk != keyHasTlk)
                            throw MakeActivityException(this, 0, "Local/Single part keys cannot be mixed with distributed(tlk) keys in keyedjoin");
                        if (keyHasTlk && superIndexWidth != f.numParts()-1)
                            throw MakeActivityException(this, 0, "Super sub keys of different width cannot be mixed with distributed(tlk) keys in keyedjoin");
                    }
                }
                if (keyHasTlk)
                    numParts = superIndexWidth * numSuperIndexSubs;
                else
                    numParts = superIndex->numParts();
            }
            else
            {
                numParts = indexFile->numParts();
                if (numParts)
                {
                    const char *kind = indexFile->queryPart(indexFile->numParts()-1).queryAttributes().queryProp("@kind");
                    keyHasTlk = NULL != kind && 0 == stricmp("topLevelKey", kind);
                    if (keyHasTlk)
                        --numParts;
                }
            }
            if (numParts)
            {
                initMb.append(numParts);
                initMb.append(superIndexWidth); // 0 if not superIndex
                initMb.append((superIndex && superIndex->isInterleaved()) ? numSuperIndexSubs : 0);
                unsigned p=0;
                UnsignedArray parts;
                for (; p<numParts; p++)
                    parts.append(p);
                indexFileDesc->serializeParts(initMb, parts);
                if (localKey)
                    keyHasTlk = false; // not used
                initMb.append(keyHasTlk);
                if (keyHasTlk)
                {
                    if (numSuperIndexSubs)
                        initMb.append(numSuperIndexSubs);
                    else
                        initMb.append((unsigned)1);

                    Owned<IDistributedFileIterator> iter;
                    IDistributedFile *f;
                    if (superIndex)
                    {
                        iter.setown(superIndex->getSubFileIterator(true));
                        f = &iter->query();
                    }
                    else
                        f = indexFile;
                    loop
                    {
                        unsigned location;
                        OwnedIFile iFile;
                        StringBuffer filePath;
                        Owned<IFileDescriptor> fileDesc = f->getFileDescriptor();
                        Owned<IPartDescriptor> tlkDesc = fileDesc->getPart(fileDesc->numParts()-1);
                        if (!getBestFilePart(this, *tlkDesc, iFile, location, filePath))
                            throw MakeThorException(TE_FileNotFound, "Top level key part does not exist, for key: %s", f->queryLogicalName());
                        OwnedIFileIO iFileIO = iFile->open(IFOread);
                        assertex(iFileIO);

                        size32_t tlkSz = (size32_t)iFileIO->size();
                        initMb.append(tlkSz);
                        ::read(iFileIO, 0, tlkSz, initMb);

                        if (!iter || !iter->next())
                            break;
                        f = &iter->query();
                    }
                }
                if (helper->diskAccessRequired())
                {
                    OwnedRoxieString fetchFilename(helper->getFileName());
                    if (fetchFilename)
                    {
                        dataFile.setown(queryThorFileManager().lookup(container.queryJob(), fetchFilename, false, 0 != (helper->getFetchFlags() & FFdatafileoptional), true));
                        if (dataFile)
                        {
                            if (superIndex)
                                throw MakeActivityException(this, 0, "Superkeys and full keyed joins are not supported");
                            dataFileDesc.setown(getConfiguredFileDescriptor(*dataFile));
                            void *ekey;
                            size32_t ekeylen;
                            helper->getFileEncryptKey(ekeylen,ekey);
                            bool encrypted = dataFileDesc->queryProperties().getPropBool("@encrypted");
                            if (0 != ekeylen)
                            {
                                memset(ekey,0,ekeylen);
                                free(ekey);
                                if (!encrypted)
                                {
                                    Owned<IException> e = MakeActivityWarning(&container, TE_EncryptionMismatch, "Ignoring encryption key provided as file '%s' was not published as encrypted", dataFile->queryLogicalName());
                                    container.queryJob().fireException(e);
                                }
                            }
                            else if (encrypted)
                                throw MakeActivityException(this, 0, "File '%s' was published as encrypted but no encryption key provided", dataFile->queryLogicalName());

                            /* If fetch file is local to cluster, fetches are sent to be processed to local node, each node has info about it's
                             * local parts only.
                             * If fetch file is off cluster, fetches are performed by requesting node directly on fetch part, therefore each nodes
                             * needs all part descriptors.
                             */
                            remoteDataFiles = false;
                            RemoteFilename rfn;
                            dataFileDesc->queryPart(0)->getFilename(0, rfn);
                            if (!rfn.queryIP().ipequals(container.queryJob().querySlaveGroup().queryNode(0).endpoint()))
                                remoteDataFiles = true;
                            if (!remoteDataFiles) // local to cluster
                            {
                                unsigned dataReadWidth = (unsigned)container.queryJob().getWorkUnitValueInt("KJDRR", 0);
                                if (!dataReadWidth || dataReadWidth>container.queryJob().querySlaves())
                                    dataReadWidth = container.queryJob().querySlaves();
                                Owned<IGroup> grp = container.queryJob().querySlaveGroup().subset((unsigned)0, dataReadWidth);
                                dataFileMapping.setown(getFileSlaveMaps(dataFile->queryLogicalName(), *dataFileDesc, container.queryJob().queryUserDescriptor(), *grp, false, false, NULL));
                                dataFileMapping->serializeFileOffsetMap(offsetMapMb.clear());
                            }
                        }
                        else
                            indexFile.clear();
                    }
                }
            }
            else
Exemple #17
0
bool canAccessDirectly(const RemoteFilename & file) // not that well named but historical
{
    return (file.getPort()==0);
}
Exemple #18
0
    void verifyFile(const char *name,CDateTime *cutoff)
    {
        Owned<IDistributedFile> file=queryDistributedFileDirectory().lookup(name,UNKNOWN_USER);
        if (!file)
            return;
        IPropertyTree &fileprops = file->queryAttributes();
        bool blocked = false;
        if (file->isCompressed(&blocked)&&!blocked)
            return;
        if (stopped)
            return;
        StringBuffer dtstr;
        if (fileprops.getProp("@verified",dtstr)) {
            if (!cutoff)
                return;
            CDateTime dt;
            dt.setString(dtstr.str());
            if (dt.compare(*cutoff)<=0)
                return;
        }
        list.kill();
        unsigned width = file->numParts();
        unsigned short port = getDaliServixPort();
        try {
            Owned<IDistributedFilePart> testpart = file->getPart(0);
            SocketEndpoint ep(testpart->queryNode()->endpoint()); 
            if (!dafilesrvips.verifyDaliFileServer(ep)) {
                StringBuffer ips;
                ep.getIpText(ips);
                PROGLOG("VERIFY: file %s, cannot run DAFILESRV on %s",name,ips.str());
                return;
            }
        }
        catch (IException *e)
        {
            StringBuffer s;
            s.appendf("VERIFY: file %s",name);
            EXCLOG(e, s.str());
            e->Release();
            return;
        }
        for (unsigned idx=0;idx<width;idx++) {
            Owned<IDistributedFilePart> part = file->getPart(idx);
            for (unsigned copy = 0; copy < part->numCopies(); copy++) {
                if (stopped)
                    return;
                unsigned reqcrc;
                if (!part->getCrc(reqcrc))
                    continue;
                SocketEndpoint ep(part->queryNode()->endpoint());
                if (!dafilesrvips.verifyDaliFileServer(ep)) {
                    StringBuffer ips;
                    ep.getIpText(ips);
                    PROGLOG("VERIFY: file %s, cannot run DAFILESRV on %s",name,ips.str());
                    continue;
                }
                RemoteFilename rfn;
                part->getFilename(rfn,copy);
                rfn.setPort(port); 
                add(rfn,idx,copy,reqcrc);
            }
        }
        if (list.ordinality()==0)
            return;
        PROGLOG("VERIFY: file %s started",name);
        file.clear();
        CriticalSection crit;
        class casyncfor: public CAsyncFor
        {
            CFileCrcList *parent;
            CriticalSection &crit;
        public:
            bool ok;
            casyncfor(CFileCrcList *_parent, CriticalSection &_crit)
                : crit(_crit)
            {
                parent = _parent;
                ok = true;
            }
            void Do(unsigned i)
            {
                CriticalBlock block(crit);
                if (parent->stopped)
                    return;
                CFileCrcItem &item = parent->list.item(i);
                RemoteFilename &rfn = item.filename;
                Owned<IFile> partfile;
                StringBuffer eps;
                try
                {
                    partfile.setown(createIFile(rfn));
                    // PROGLOG("VERIFY: part %s on %s",partfile->queryFilename(),rfn.queryEndpoint().getUrlStr(eps).str());
                    if (partfile) {
                        if (parent->stopped)
                            return;
                        CriticalUnblock unblock(crit);
                        item.crc = partfile->getCRC();
                    }
                    else
                        ok = false;

                }
                catch (IException *e)
                {
                    StringBuffer s;
                    s.appendf("VERIFY: part %s on %s",partfile->queryFilename(),rfn.queryEndpoint().getUrlStr(eps).str());
                    EXCLOG(e, s.str());
                    e->Release();
                    ok = false;
                }
            }
        } afor(this,crit);
        afor.For(list.ordinality(),50,false,true);
        ForEachItemIn(j,list) {
            CFileCrcItem &item = list.item(j);
            if (item.crc!=item.requiredcrc) {
                if (stopped)
                    return;
                StringBuffer rfs;
                PROGLOG("VERIFY: FAILED %s (%x,%x) file %s",name,item.crc,item.requiredcrc,item.filename.getRemotePath(rfs).str());
                afor.ok = false;
            }
        }
int main(int argc, char* argv[])
{

    InitModuleObjects();
    EnableSEHtoExceptionMapping();
    if (argc<2) {
        usage(argv[0]);
        return -1;
    }
    bool forceGroupUpdate = false;
    StringBuffer filename;
    StringBuffer inst;
    StringBuffer dcat;
    StringBuffer dcomp;
    StringBuffer dinst;
    StringBuffer dip;
    for (int i=1;i<argc;i++) {
        if (argv[i][0]=='-') {
            if ((stricmp(argv[i],"-i")==0)&&(i+1<argc)) {
                inst.append(argv[++i]);
            }
            else if (0==stricmp(argv[i],"-f")) {
                forceGroupUpdate = true;
            }
            else if ((stricmp(argv[i],"-d")==0)&&(i+3<argc)) {
                dcat.append(argv[++i]);
                dcomp.append(argv[++i]);
                dinst.append(argv[++i]);
            }
            else if ((stricmp(argv[i],"-ip")==0)&&(i+1<argc)) {
                dip.append(argv[++i]);
            }
            else {
                usage(argv[0]);
                return -1;
            }
        }
        else {
            if (filename.length()) {
                usage(argv[0]);
                return -1;
            }
            filename.append(argv[i]);
        }
    }       

    Owned<IPropertyTree> env;
    try {
        env.setown(createPTreeFromXMLFile(argv[1]));
        if (!env.get()) {
            fprintf(stderr,"Could not load Environment from %s\n",argv[1]);
            return 1;
        }
        const char *s = env->queryName();
        if (!s||(strcmp(s,"Environment")!=0)) {
            fprintf(stderr,"File %s is invalid\n",argv[1]);
            return 1;
        }
    }
    catch (IException *e) {
        StringBuffer err;
        e->errorMessage(err);
        fprintf(stderr,"Could not load Environment from %s, %s\n",argv[1],err.str());
        return 1;
    }
    int ret = 0;
    try {
        if (dcat.length()) {
            IPropertyTree* dirs = env->queryPropTree("Software/Directories");
            StringBuffer dirout;
            if (getConfigurationDirectory(dirs,dcat.str(),dcomp.str(),dinst.str(),dirout)&&dirout.length()) {
                if (dip.length()) {
                    SocketEndpoint ep(dip.str());
                    RemoteFilename rfn;
                    rfn.setPath(ep,dirout.str());
                    rfn.getRemotePath(dirout.clear());
                }
                printf("%s",dirout.str());
            }
            else {
                ret = 1;
            }
        }
        else {
            if (!updateDaliEnv(env, forceGroupUpdate, inst.str()))
                ret = 1;
        }
    }
    catch (IException *e) {
        pexception("updtdalienv",e);
        e->Release();
        ret = 1;
    }
    releaseAtoms();
    return ret;
}
Exemple #20
0
void CachedPasswordProvider::addPasswordForFilename(RemoteFilename & filename)
{
    addPasswordForIp(filename.queryIP());
}
Exemple #21
0
void CachedPasswordProvider::addPasswordForFilename(const char * filename)
{
    RemoteFilename remote;
    remote.setRemotePath(filename);
    addPasswordForIp(remote.queryIP());
}
static void _doReplicate(CActivityBase *activity, IPartDescriptor &partDesc, ICopyFileProgress *iProgress)
{
    StringBuffer primaryName;
    getPartFilename(partDesc, 0, primaryName);;
    RemoteFilename rfn;
    IFileDescriptor &fileDesc = partDesc.queryOwner();
    unsigned copies = partDesc.numCopies();
    unsigned c=1;
    for (; c<copies; c++)
    {
        unsigned replicateCopy;
        unsigned clusterNum = partDesc.copyClusterNum(c, &replicateCopy);
        rfn.clear();
        partDesc.getFilename(c, rfn);
        StringBuffer dstName;
        rfn.getPath(dstName);
        assertex(dstName.length());

        if (replicateCopy>0 )  
        {
            try
            {
                queryThor().queryBackup().backup(dstName.str(), primaryName.str());
            }
            catch (IException *e)
            {
                Owned<IThorException> re = MakeActivityWarning(activity, e, "Failed to create replicate file '%s'", dstName.str());
                e->Release();
                activity->fireException(re);
            }
        }
        else // another primary
        {
            ActPrintLog(activity, "Copying to primary %s", dstName.str());
            StringBuffer tmpName(dstName.str());
            tmpName.append(".tmp");
            OwnedIFile tmpIFile = createIFile(tmpName.str());
            OwnedIFile srcFile = createIFile(primaryName.str());
            CFIPScope fipScope(tmpName.str());
            try
            {
                try
                {
                    ensureDirectoryForFile(dstName.str());
                    ::copyFile(tmpIFile, srcFile, 0x100000, iProgress);
                }
                catch (IException *e)
                {
                    IThorException *re = MakeActivityException(activity, e, "Failed to copy to tmp file '%s' from source file '%s'", tmpIFile->queryFilename(), srcFile->queryFilename());
                    e->Release();
                    throw re;
                }
                try
                {
                    OwnedIFile dstIFile = createIFile(dstName.str());
                    dstIFile->remove();
                    tmpIFile->rename(pathTail(dstName.str()));
                }
                catch (IException *e)
                {
                    IThorException *re = ThorWrapException(e, "Failed to rename '%s' to '%s'", tmpName.str(), dstName.str());
                    e->Release();
                    throw re;
                }
            }
            catch (IException *)
            {
                try { tmpIFile->remove(); }
                catch (IException *e) { ActPrintLog(&activity->queryContainer(), e, NULL); e->Release(); }
                throw;
            }
        }
    }
}